index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
17,330 | imodels.tree.hierarchical_shrinkage | __repr__ | null | def __repr__(self):
# s = self.__class__.__name__
# s += "("
# s += "estimator_="
# s += repr(self.estimator_)
# s += ", "
# s += "reg_param="
# s += str(self.reg_param)
# s += ", "
# s += "shrinkage_scheme_="
# s += self.shrinkage_scheme_
# s += ")"
# return s
attr_list = ["estimator_", "reg_param", "shrinkage_scheme_"]
s = self.__class__.__name__
s += "("
for attr in attr_list:
s += attr + "=" + repr(getattr(self, attr)) + ", "
s = s[:-2] + ")"
return s
| (self) |
17,333 | imodels.tree.hierarchical_shrinkage | __str__ | null | def __str__(self):
# check if fitted
if not checks.check_is_fitted(self.estimator_):
s = self.__class__.__name__
s += "("
s += "est="
s += repr(self.estimator_)
s += ", "
s += "reg_param="
s += str(self.reg_param)
s += ")"
return s
else:
s = "> ------------------------------\n"
s += "> Decision Tree with Hierarchical Shrinkage\n"
s += "> \tPrediction is made by looking at the value in the appropriate leaf of the tree\n"
s += "> ------------------------------" + "\n"
if hasattr(self, "feature_names") and self.feature_names is not None:
return s + export_text(
self.estimator_, feature_names=self.feature_names, show_weights=True
)
else:
return s + export_text(self.estimator_, show_weights=True)
| (self) |
17,342 | imodels.tree.hierarchical_shrinkage | _shrink | null | def _shrink(self):
if hasattr(self.estimator_, "tree_"):
self._shrink_tree(self.estimator_.tree_, self.reg_param)
elif hasattr(self.estimator_, "estimators_"):
for t in self.estimator_.estimators_:
if isinstance(t, np.ndarray):
assert t.size == 1, "multiple trees stored under tree_?"
t = t[0]
self._shrink_tree(t.tree_, self.reg_param)
| (self) |
17,343 | imodels.tree.hierarchical_shrinkage | _shrink_tree | Shrink the tree | def _shrink_tree(
self, tree, reg_param, i=0, parent_val=None, parent_num=None, cum_sum=0
):
"""Shrink the tree"""
if reg_param is None:
reg_param = 1.0
left = tree.children_left[i]
right = tree.children_right[i]
is_leaf = left == right
n_samples = tree.weighted_n_node_samples[i]
if isinstance(self, RegressorMixin) or isinstance(
self.estimator_, GradientBoostingClassifier
):
val = deepcopy(tree.value[i, :, :])
else: # If classification, normalize to probability vector
val = tree.value[i, :, :] / n_samples
# Step 1: Update cum_sum
# if root
if parent_val is None and parent_num is None:
cum_sum = val
# if has parent
else:
if self.shrinkage_scheme_ == "node_based":
val_new = (val - parent_val) / (1 + reg_param / parent_num)
elif self.shrinkage_scheme_ == "constant":
val_new = (val - parent_val) / (1 + reg_param)
else: # leaf_based
val_new = 0
cum_sum += val_new
# Step 2: Update node values
if (
self.shrinkage_scheme_ == "node_based"
or self.shrinkage_scheme_ == "constant"
):
tree.value[i, :, :] = cum_sum
else: # leaf_based
if is_leaf: # update node values if leaf_based
root_val = tree.value[0, :, :]
tree.value[i, :, :] = root_val + (val - root_val) / (
1 + reg_param / n_samples
)
else:
tree.value[i, :, :] = val
# Step 3: Recurse if not leaf
if not is_leaf:
self._shrink_tree(
tree,
reg_param,
left,
parent_val=val,
parent_num=n_samples,
cum_sum=deepcopy(cum_sum),
)
self._shrink_tree(
tree,
reg_param,
right,
parent_val=val,
parent_num=n_samples,
cum_sum=deepcopy(cum_sum),
)
# edit the non-leaf nodes for later visualization (doesn't effect predictions)
return tree
| (self, tree, reg_param, i=0, parent_val=None, parent_num=None, cum_sum=0) |
17,346 | imodels.tree.cart_ccp | fit | null | def fit(self, X, y, sample_weight=None, *args, **kwargs):
m = DecisionTreeCCPClassifier(self.estimator_, desired_complexity=self.desired_complexity)
m.fit(X, y, sample_weight, *args, **kwargs)
self.scores_ = []
for reg_param in self.reg_param_list:
est = HSTreeClassifier(deepcopy(m.estimator_), reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (self, X, y, sample_weight=None, *args, **kwargs) |
17,348 | imodels.tree.hierarchical_shrinkage | get_params | null | def get_params(self, deep=True):
d = {
"reg_param": self.reg_param,
"estimator_": self.estimator_,
"shrinkage_scheme_": self.shrinkage_scheme_,
"max_leaf_nodes": self.estimator_.max_leaf_nodes,
}
if deep:
return deepcopy(d)
return d
| (self, deep=True) |
17,350 | imodels.tree.hierarchical_shrinkage | predict_proba | null | def predict_proba(self, X, *args, **kwargs):
if hasattr(self.estimator_, "predict_proba"):
return self.estimator_.predict_proba(X, *args, **kwargs)
else:
return NotImplemented
| (self, X, *args, **kwargs) |
17,351 | imodels.tree.hierarchical_shrinkage | score | null | def score(self, X, y, *args, **kwargs):
if hasattr(self.estimator_, "score"):
return self.estimator_.score(X, y, *args, **kwargs)
else:
return NotImplemented
| (self, X, y, *args, **kwargs) |
17,354 | imodels.tree.cart_ccp | HSDecisionTreeCCPRegressorCV | null | class HSDecisionTreeCCPRegressorCV(HSTreeRegressor):
def __init__(self, estimator_: BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500],
desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs):
super().__init__(estimator_=estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.desired_complexity = desired_complexity
def fit(self, X, y, sample_weight=None, *args, **kwargs):
m = DecisionTreeCCPRegressor(self.estimator_, desired_complexity=self.desired_complexity)
m.fit(X, y, sample_weight, *args, **kwargs)
self.scores_ = []
for reg_param in self.reg_param_list:
est = HSTreeRegressor(deepcopy(m.estimator_), reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (estimator_: sklearn.base.BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs) |
17,373 | imodels.tree.cart_ccp | fit | null | def fit(self, X, y, sample_weight=None, *args, **kwargs):
m = DecisionTreeCCPRegressor(self.estimator_, desired_complexity=self.desired_complexity)
m.fit(X, y, sample_weight, *args, **kwargs)
self.scores_ = []
for reg_param in self.reg_param_list:
est = HSTreeRegressor(deepcopy(m.estimator_), reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (self, X, y, sample_weight=None, *args, **kwargs) |
17,381 | imodels.tree.gosdt.pygosdt_shrinkage | HSOptimalTreeClassifier | null | class HSOptimalTreeClassifier(BaseEstimator):
def __init__(self, estimator_: OptimalTreeClassifier, reg_param: float = 1, shrinkage_scheme_: str = 'node_based'):
"""
Params
------
reg_param: float
Higher is more regularization (can be arbitrarily large, should not be < 0)
shrinkage_scheme: str
Experimental: Used to experiment with different forms of shrinkage. options are:
(i) node_based shrinks based on number of samples in parent node
(ii) leaf_based only shrinks leaf nodes based on number of leaf samples
(iii) constant shrinks every node by a constant lambda
"""
super().__init__()
self.reg_param = reg_param
# print('est', estimator_)
self.estimator_ = estimator_
# self.tree_ = estimator_.tree_
self.shrinkage_scheme_ = shrinkage_scheme_
def _calc_probs(self, node):
lbls = np.array([float(l) for l in node["labels"]]) if "labels" in node else np.array(
[float(node['prediction'])])
node['probs'] = np.mean(lbls == 1)
node['n_obs'] = len(node.get('labels', []))
if "prediction" in node:
node['prediction'] = np.round(node['probs'])
return
self._calc_probs(node['true'])
self._calc_probs(node['false'])
def impute_nodes(self, X, y):
"""
Returns
---
the leaf by which this sample would be classified
"""
source_node = self.estimator_.tree_.source
for i in range(len(y)):
sample, label = X[i, ...], y[i]
_add_label(source_node, label)
nodes = [source_node]
while len(nodes) > 0:
node = nodes.pop()
if "prediction" in node:
continue
else:
value = sample[node["feature"]]
reference = node["reference"]
relation = node["relation"]
if relation == "==":
is_true = value == reference
elif relation == ">=":
is_true = value >= reference
elif relation == "<=":
is_true = value <= reference
elif relation == "<":
is_true = value < reference
elif relation == ">":
is_true = value > reference
else:
raise "Unsupported relational operator {}".format(node["relation"])
next_node = node['true'] if is_true else node['false']
_add_label(next_node, label)
nodes.append(next_node)
self._calc_probs(source_node)
self.estimator_.tree_.source = source_node
# def fit(self, *args, **kwargs):
# X = kwargs['X'] if "X" in kwargs else args[0]
# y = kwargs['y'] if "y" in kwargs else args[1]
def shrink_tree(self):
root = self.estimator_.tree_.source
shrink_node(root, self.reg_param, None, None, 0, self.shrinkage_scheme_, 0)
def predict_proba(self, X):
probs = []
for i in range(X.shape[0]):
sample = X[i, ...]
node = self.estimator_.tree_.__find_leaf__(sample)
probs.append([1 - node["probs"], node["probs"]])
return np.array(probs)
def fit(self, *args, **kwargs):
X = kwargs['X'] if "X" in kwargs else args[0]
y = kwargs['y'] if "y" in kwargs else args[1]
if not hasattr(self.estimator_, "tree_"):
self.estimator_.fit(X, y)
self.impute_nodes(X, y)
self.shrink_tree()
def predict(self, X):
return self.estimator_.predict(X)
def score(self, X, y, weight=None):
self.estimator_.score(X, y, weight)
@property
def complexity_(self):
return self.estimator_.complexity_
| (estimator_: imodels.tree.gosdt.pygosdt.OptimalTreeClassifier, reg_param: float = 1, shrinkage_scheme_: str = 'node_based') |
17,383 | imodels.tree.gosdt.pygosdt_shrinkage | __init__ |
Params
------
reg_param: float
Higher is more regularization (can be arbitrarily large, should not be < 0)
shrinkage_scheme: str
Experimental: Used to experiment with different forms of shrinkage. options are:
(i) node_based shrinks based on number of samples in parent node
(ii) leaf_based only shrinks leaf nodes based on number of leaf samples
(iii) constant shrinks every node by a constant lambda
| def __init__(self, estimator_: OptimalTreeClassifier, reg_param: float = 1, shrinkage_scheme_: str = 'node_based'):
"""
Params
------
reg_param: float
Higher is more regularization (can be arbitrarily large, should not be < 0)
shrinkage_scheme: str
Experimental: Used to experiment with different forms of shrinkage. options are:
(i) node_based shrinks based on number of samples in parent node
(ii) leaf_based only shrinks leaf nodes based on number of leaf samples
(iii) constant shrinks every node by a constant lambda
"""
super().__init__()
self.reg_param = reg_param
# print('est', estimator_)
self.estimator_ = estimator_
# self.tree_ = estimator_.tree_
self.shrinkage_scheme_ = shrinkage_scheme_
| (self, estimator_: imodels.tree.gosdt.pygosdt.OptimalTreeClassifier, reg_param: float = 1, shrinkage_scheme_: str = 'node_based') |
17,387 | imodels.tree.gosdt.pygosdt_shrinkage | _calc_probs | null | def _calc_probs(self, node):
lbls = np.array([float(l) for l in node["labels"]]) if "labels" in node else np.array(
[float(node['prediction'])])
node['probs'] = np.mean(lbls == 1)
node['n_obs'] = len(node.get('labels', []))
if "prediction" in node:
node['prediction'] = np.round(node['probs'])
return
self._calc_probs(node['true'])
self._calc_probs(node['false'])
| (self, node) |
17,398 | imodels.tree.gosdt.pygosdt_shrinkage | fit | null | def fit(self, *args, **kwargs):
X = kwargs['X'] if "X" in kwargs else args[0]
y = kwargs['y'] if "y" in kwargs else args[1]
if not hasattr(self.estimator_, "tree_"):
self.estimator_.fit(X, y)
self.impute_nodes(X, y)
self.shrink_tree()
| (self, *args, **kwargs) |
17,401 | imodels.tree.gosdt.pygosdt_shrinkage | impute_nodes |
Returns
---
the leaf by which this sample would be classified
| def impute_nodes(self, X, y):
"""
Returns
---
the leaf by which this sample would be classified
"""
source_node = self.estimator_.tree_.source
for i in range(len(y)):
sample, label = X[i, ...], y[i]
_add_label(source_node, label)
nodes = [source_node]
while len(nodes) > 0:
node = nodes.pop()
if "prediction" in node:
continue
else:
value = sample[node["feature"]]
reference = node["reference"]
relation = node["relation"]
if relation == "==":
is_true = value == reference
elif relation == ">=":
is_true = value >= reference
elif relation == "<=":
is_true = value <= reference
elif relation == "<":
is_true = value < reference
elif relation == ">":
is_true = value > reference
else:
raise "Unsupported relational operator {}".format(node["relation"])
next_node = node['true'] if is_true else node['false']
_add_label(next_node, label)
nodes.append(next_node)
self._calc_probs(source_node)
self.estimator_.tree_.source = source_node
| (self, X, y) |
17,402 | imodels.tree.gosdt.pygosdt_shrinkage | predict | null | def predict(self, X):
return self.estimator_.predict(X)
| (self, X) |
17,403 | imodels.tree.gosdt.pygosdt_shrinkage | predict_proba | null | def predict_proba(self, X):
probs = []
for i in range(X.shape[0]):
sample = X[i, ...]
node = self.estimator_.tree_.__find_leaf__(sample)
probs.append([1 - node["probs"], node["probs"]])
return np.array(probs)
| (self, X) |
17,404 | imodels.tree.gosdt.pygosdt_shrinkage | score | null | def score(self, X, y, weight=None):
self.estimator_.score(X, y, weight)
| (self, X, y, weight=None) |
17,406 | sklearn.utils._metadata_requests | set_score_request | Request metadata passed to the ``score`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``score`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``score``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``weight`` parameter in ``score``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.tree.gosdt.pygosdt_shrinkage.HSOptimalTreeClassifier, *, weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.gosdt.pygosdt_shrinkage.HSOptimalTreeClassifier |
17,407 | imodels.tree.gosdt.pygosdt_shrinkage | shrink_tree | null | def shrink_tree(self):
root = self.estimator_.tree_.source
shrink_node(root, self.reg_param, None, None, 0, self.shrinkage_scheme_, 0)
| (self) |
17,408 | imodels.tree.gosdt.pygosdt_shrinkage | HSOptimalTreeClassifierCV | null | class HSOptimalTreeClassifierCV(HSOptimalTreeClassifier):
def __init__(self, estimator_: OptimalTreeClassifier,
reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based',
cv: int = 3, scoring="accuracy", *args, **kwargs):
"""Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args
"""
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
def fit(self, X, y, *args, **kwargs):
self.scores_ = []
opt = copy.deepcopy(self.estimator_)
for reg_param in self.reg_param_list:
est = HSOptimalTreeClassifier(opt, reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (estimator_: imodels.tree.gosdt.pygosdt.OptimalTreeClassifier, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', cv: int = 3, scoring='accuracy', *args, **kwargs) |
17,410 | imodels.tree.gosdt.pygosdt_shrinkage | __init__ | Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args
| def __init__(self, estimator_: OptimalTreeClassifier,
reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based',
cv: int = 3, scoring="accuracy", *args, **kwargs):
"""Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args
"""
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
| (self, estimator_: imodels.tree.gosdt.pygosdt.OptimalTreeClassifier, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', cv: int = 3, scoring='accuracy', *args, **kwargs) |
17,425 | imodels.tree.gosdt.pygosdt_shrinkage | fit | null | def fit(self, X, y, *args, **kwargs):
self.scores_ = []
opt = copy.deepcopy(self.estimator_)
for reg_param in self.reg_param_list:
est = HSOptimalTreeClassifier(opt, reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (self, X, y, *args, **kwargs) |
17,435 | imodels.tree.hierarchical_shrinkage | HSTreeClassifier | null | class HSTreeClassifier(HSTree, ClassifierMixin):
def __init__(
self,
estimator_: BaseEstimator = DecisionTreeClassifier(max_leaf_nodes=20),
reg_param: float = 1,
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = None,
random_state: int = None,
):
super().__init__(
estimator_=estimator_,
reg_param=reg_param,
shrinkage_scheme_=shrinkage_scheme_,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
)
| (estimator_: sklearn.base.BaseEstimator = DecisionTreeClassifier(max_leaf_nodes=20), reg_param: float = 1, shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = None, random_state: int = None) |
17,437 | imodels.tree.hierarchical_shrinkage | __init__ | null | def __init__(
self,
estimator_: BaseEstimator = DecisionTreeClassifier(max_leaf_nodes=20),
reg_param: float = 1,
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = None,
random_state: int = None,
):
super().__init__(
estimator_=estimator_,
reg_param=reg_param,
shrinkage_scheme_=shrinkage_scheme_,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
)
| (self, estimator_: sklearn.base.BaseEstimator = DecisionTreeClassifier(max_leaf_nodes=20), reg_param: float = 1, shrinkage_scheme_: str = 'node_based', max_leaf_nodes: Optional[int] = None, random_state: Optional[int] = None) |
17,454 | imodels.tree.hierarchical_shrinkage | fit | null | def fit(self, X, y, sample_weight=None, *args, **kwargs):
# remove feature_names if it exists (note: only works as keyword-arg)
# None returned if not passed
feature_names = kwargs.pop("feature_names", None)
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
if feature_names is not None:
self.feature_names = feature_names
self.estimator_ = self.estimator_.fit(
X, y, *args, sample_weight=sample_weight, **kwargs
)
self._shrink()
# compute complexity
if hasattr(self.estimator_, "tree_"):
self.complexity_ = compute_tree_complexity(self.estimator_.tree_)
elif hasattr(self.estimator_, "estimators_"):
self.complexity_ = 0
for i in range(len(self.estimator_.estimators_)):
t = deepcopy(self.estimator_.estimators_[i])
if isinstance(t, np.ndarray):
assert t.size == 1, "multiple trees stored under tree_?"
t = t[0]
self.complexity_ += compute_tree_complexity(t.tree_)
return self
| (self, X, y, sample_weight=None, *args, **kwargs) |
17,462 | imodels.tree.hierarchical_shrinkage | HSTreeClassifierCV | null | class HSTreeClassifierCV(HSTreeClassifier):
def __init__(
self,
estimator_: BaseEstimator = None,
reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500],
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = 20,
cv: int = 3,
scoring=None,
*args,
**kwargs
):
"""Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
"""
if estimator_ is None:
estimator_ = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes)
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
def fit(self, X, y, *args, **kwargs):
self.scores_ = [[] for _ in self.reg_param_list]
scorer = kwargs.get("scoring", log_loss)
kf = KFold(n_splits=self.cv)
for train_index, test_index in kf.split(X):
X_out, y_out = X[test_index, :], y[test_index]
X_in, y_in = X[train_index, :], y[train_index]
base_est = deepcopy(self.estimator_)
base_est.fit(X_in, y_in)
for i, reg_param in enumerate(self.reg_param_list):
est_hs = HSTreeClassifier(base_est, reg_param)
est_hs.fit(X_in, y_in, *args, **kwargs)
self.scores_[i].append(
scorer(y_out, est_hs.predict_proba(X_out)))
self.scores_ = [np.mean(s) for s in self.scores_]
cv_criterion = _get_cv_criterion(scorer)
self.reg_param = self.reg_param_list[cv_criterion(self.scores_)]
super().fit(X=X, y=y, *args, **kwargs)
def __repr__(self):
attr_list = [
"estimator_",
"reg_param_list",
"shrinkage_scheme_",
"cv",
"scoring",
]
s = self.__class__.__name__
s += "("
for attr in attr_list:
s += attr + "=" + repr(getattr(self, attr)) + ", "
s = s[:-2] + ")"
return s
| (estimator_: sklearn.base.BaseEstimator = None, reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = 20, cv: int = 3, scoring=None, *args, **kwargs) |
17,464 | imodels.tree.hierarchical_shrinkage | __init__ | Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
| def __init__(
self,
estimator_: BaseEstimator = None,
reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500],
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = 20,
cv: int = 3,
scoring=None,
*args,
**kwargs
):
"""Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
"""
if estimator_ is None:
estimator_ = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes)
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
| (self, estimator_: Optional[sklearn.base.BaseEstimator] = None, reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = 20, cv: int = 3, scoring=None, *args, **kwargs) |
17,465 | imodels.tree.hierarchical_shrinkage | __repr__ | null | def __repr__(self):
attr_list = [
"estimator_",
"reg_param_list",
"shrinkage_scheme_",
"cv",
"scoring",
]
s = self.__class__.__name__
s += "("
for attr in attr_list:
s += attr + "=" + repr(getattr(self, attr)) + ", "
s = s[:-2] + ")"
return s
| (self) |
17,481 | imodels.tree.hierarchical_shrinkage | fit | null | def fit(self, X, y, *args, **kwargs):
self.scores_ = [[] for _ in self.reg_param_list]
scorer = kwargs.get("scoring", log_loss)
kf = KFold(n_splits=self.cv)
for train_index, test_index in kf.split(X):
X_out, y_out = X[test_index, :], y[test_index]
X_in, y_in = X[train_index, :], y[train_index]
base_est = deepcopy(self.estimator_)
base_est.fit(X_in, y_in)
for i, reg_param in enumerate(self.reg_param_list):
est_hs = HSTreeClassifier(base_est, reg_param)
est_hs.fit(X_in, y_in, *args, **kwargs)
self.scores_[i].append(
scorer(y_out, est_hs.predict_proba(X_out)))
self.scores_ = [np.mean(s) for s in self.scores_]
cv_criterion = _get_cv_criterion(scorer)
self.reg_param = self.reg_param_list[cv_criterion(self.scores_)]
super().fit(X=X, y=y, *args, **kwargs)
| (self, X, y, *args, **kwargs) |
17,489 | imodels.tree.hierarchical_shrinkage | HSTreeRegressor | null | class HSTreeRegressor(HSTree, RegressorMixin):
def __init__(
self,
estimator_: BaseEstimator = DecisionTreeRegressor(max_leaf_nodes=20),
reg_param: float = 1,
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = None,
random_state: int = None,
):
super().__init__(
estimator_=estimator_,
reg_param=reg_param,
shrinkage_scheme_=shrinkage_scheme_,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
)
| (estimator_: sklearn.base.BaseEstimator = DecisionTreeRegressor(max_leaf_nodes=20), reg_param: float = 1, shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = None, random_state: int = None) |
17,491 | imodels.tree.hierarchical_shrinkage | __init__ | null | def __init__(
self,
estimator_: BaseEstimator = DecisionTreeRegressor(max_leaf_nodes=20),
reg_param: float = 1,
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = None,
random_state: int = None,
):
super().__init__(
estimator_=estimator_,
reg_param=reg_param,
shrinkage_scheme_=shrinkage_scheme_,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
)
| (self, estimator_: sklearn.base.BaseEstimator = DecisionTreeRegressor(max_leaf_nodes=20), reg_param: float = 1, shrinkage_scheme_: str = 'node_based', max_leaf_nodes: Optional[int] = None, random_state: Optional[int] = None) |
17,516 | imodels.tree.hierarchical_shrinkage | HSTreeRegressorCV | null | class HSTreeRegressorCV(HSTreeRegressor):
def __init__(
self,
estimator_: BaseEstimator = None,
reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500],
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = 20,
cv: int = 3,
scoring=None,
*args,
**kwargs
):
"""Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
"""
if estimator_ is None:
estimator_ = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes)
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
def fit(self, X, y, *args, **kwargs):
self.scores_ = [[] for _ in self.reg_param_list]
kf = KFold(n_splits=self.cv)
scorer = kwargs.get("scoring", mean_squared_error)
for train_index, test_index in kf.split(X):
X_out, y_out = X[test_index, :], y[test_index]
X_in, y_in = X[train_index, :], y[train_index]
base_est = deepcopy(self.estimator_)
base_est.fit(X_in, y_in)
for i, reg_param in enumerate(self.reg_param_list):
est_hs = HSTreeRegressor(base_est, reg_param)
est_hs.fit(X_in, y_in)
self.scores_[i].append(scorer(est_hs.predict(X_out), y_out))
self.scores_ = [np.mean(s) for s in self.scores_]
cv_criterion = _get_cv_criterion(scorer)
self.reg_param = self.reg_param_list[cv_criterion(self.scores_)]
super().fit(X=X, y=y, *args, **kwargs)
def __repr__(self):
attr_list = [
"estimator_",
"reg_param_list",
"shrinkage_scheme_",
"cv",
"scoring",
]
s = self.__class__.__name__
s += "("
for attr in attr_list:
s += attr + "=" + repr(getattr(self, attr)) + ", "
s = s[:-2] + ")"
return s
| (estimator_: sklearn.base.BaseEstimator = None, reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = 20, cv: int = 3, scoring=None, *args, **kwargs) |
17,518 | imodels.tree.hierarchical_shrinkage | __init__ | Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
| def __init__(
self,
estimator_: BaseEstimator = None,
reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500],
shrinkage_scheme_: str = "node_based",
max_leaf_nodes: int = 20,
cv: int = 3,
scoring=None,
*args,
**kwargs
):
"""Cross-validation is used to select the best regularization parameter for hierarchical shrinkage.
Params
------
estimator_
Sklearn estimator (already initialized).
If no estimator_ is passed, sklearn decision tree is used
max_rules
If estimator is None, then max_leaf_nodes is passed to the default decision tree
args, kwargs
Note: args, kwargs are not used but left so that imodels-experiments can still pass redundant args.
"""
if estimator_ is None:
estimator_ = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes)
super().__init__(estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.shrinkage_scheme_ = shrinkage_scheme_
# print('estimator', self.estimator_,
# 'checks.check_is_fitted(estimator)', checks.check_is_fitted(self.estimator_))
# if checks.check_is_fitted(self.estimator_):
# raise Warning('Passed an already fitted estimator,'
# 'but shrinking not applied until fit method is called.')
| (self, estimator_: Optional[sklearn.base.BaseEstimator] = None, reg_param_list: List[float] = [0, 0.1, 1, 10, 50, 100, 500], shrinkage_scheme_: str = 'node_based', max_leaf_nodes: int = 20, cv: int = 3, scoring=None, *args, **kwargs) |
17,535 | imodels.tree.hierarchical_shrinkage | fit | null | def fit(self, X, y, *args, **kwargs):
self.scores_ = [[] for _ in self.reg_param_list]
kf = KFold(n_splits=self.cv)
scorer = kwargs.get("scoring", mean_squared_error)
for train_index, test_index in kf.split(X):
X_out, y_out = X[test_index, :], y[test_index]
X_in, y_in = X[train_index, :], y[train_index]
base_est = deepcopy(self.estimator_)
base_est.fit(X_in, y_in)
for i, reg_param in enumerate(self.reg_param_list):
est_hs = HSTreeRegressor(base_est, reg_param)
est_hs.fit(X_in, y_in)
self.scores_[i].append(scorer(est_hs.predict(X_out), y_out))
self.scores_ = [np.mean(s) for s in self.scores_]
cv_criterion = _get_cv_criterion(scorer)
self.reg_param = self.reg_param_list[cv_criterion(self.scores_)]
super().fit(X=X, y=y, *args, **kwargs)
| (self, X, y, *args, **kwargs) |
17,543 | imodels.discretization.mdlp | MDLPDiscretizer | null | class MDLPDiscretizer(object):
def __init__(self, dataset, class_label, out_path_data=None, out_path_bins=None, features=None):
'''
initializes discretizer object:
saves raw copy of data and creates self._data with only features to discretize and class
computes initial entropy (before any splitting)
self._features = features to be discretized
self._classes = unique classes in raw_data
self._class_name = label of class in pandas dataframe
self._data = partition of data with only features of interest and class
self._cuts = dictionary with cut points for each feature
Params
------
dataset
pandas dataframe with data to discretize
class_label
name of the column containing class in input dataframe
features
if !None, features that the user wants to discretize specifically
'''
if not isinstance(dataset, pd.core.frame.DataFrame): # class needs a pandas dataframe
raise AttributeError('input dataset should be a pandas data frame')
self._data_raw = dataset # copy or original input data
self._class_name = class_label
self._classes = self._data_raw[self._class_name] # .unique()
self._classes.drop_duplicates()
# if user specifies which attributes to discretize
if features:
self._features = [f for f in features if f in self._data_raw.columns] # check if features in dataframe
missing = set(features) - set(self._features) # specified columns not in dataframe
if missing:
print('WARNING: user-specified features %s not in input dataframe' % str(missing))
else: # then we need to recognize which features are numeric
numeric_cols = self._data_raw._data.get_numeric_data().items
self._features = [f for f in numeric_cols if f != class_label]
# other features that won't be discretized
self._ignored_features = set(self._data_raw.columns) - set(self._features)
# create copy of data only including features to discretize and class
self._data = self._data_raw.loc[:, self._features + [class_label]]
self._data = self._data.infer_objects() # convert_objects(convert_numeric=True)
# pre-compute all boundary points in dataset
self._boundaries = self._compute_boundary_points_all_features()
# initialize feature bins with empty arrays
self._cuts = {f: [] for f in self._features}
# get cuts for all features
self._all_features_accepted_cutpoints()
# discretize self._data
self._apply_cutpoints(out_data_path=out_path_data, out_bins_path=out_path_bins)
def MDLPC_criterion(self, data, feature, cut_point):
'''
Determines whether a partition is accepted according to the MDLPC criterion
:param feature: feature of interest
:param cut_point: proposed cut_point
:param partition_index: index of the sample (dataframe partition) in the interval of interest
:return: True/False, whether to accept the partition
'''
# get dataframe only with desired attribute and class columns, and split by cut_point
data_partition = data.copy(deep=True)
data_left = data_partition[data_partition[feature] <= cut_point]
data_right = data_partition[data_partition[feature] > cut_point]
# compute information gain obtained when splitting data at cut_point
cut_point_gain = cut_point_information_gain(dataset=data_partition, cut_point=cut_point,
feature_label=feature, class_label=self._class_name)
# compute delta term in MDLPC criterion
N = len(data_partition) # number of examples in current partition
partition_entropy = entropy(data_partition[self._class_name])
k = len(data_partition[self._class_name].unique())
k_left = len(data_left[self._class_name].unique())
k_right = len(data_right[self._class_name].unique())
entropy_left = entropy(data_left[self._class_name]) # entropy of partition
entropy_right = entropy(data_right[self._class_name])
delta = log(3 ** k, 2) - (k * partition_entropy) + (k_left * entropy_left) + (k_right * entropy_right)
# to split or not to split
gain_threshold = (log(N - 1, 2) + delta) / N
if cut_point_gain > gain_threshold:
return True
else:
return False
def _feature_boundary_points(self, data, feature):
'''
Given an attribute, find all potential cut_points (boundary points)
:param feature: feature of interest
:param partition_index: indices of rows for which feature value falls within interval of interest
:return: array with potential cut_points
'''
# get dataframe with only rows of interest, and feature and class columns
data_partition = data.copy(deep=True)
data_partition.sort_values(feature, ascending=True, inplace=True)
boundary_points = []
# add temporary columns
data_partition['class_offset'] = data_partition[self._class_name].shift(
1) # column where first value is now second, and so forth
data_partition['feature_offset'] = data_partition[feature].shift(
1) # column where first value is now second, and so forth
data_partition['feature_change'] = (data_partition[feature] != data_partition['feature_offset'])
data_partition['mid_points'] = data_partition.loc[:, [feature, 'feature_offset']].mean(axis=1)
potential_cuts = data_partition[data_partition['feature_change'] == True].index[1:]
sorted_index = data_partition.index.tolist()
for row in potential_cuts:
old_value = data_partition.loc[sorted_index[sorted_index.index(row) - 1]][feature]
new_value = data_partition.loc[row][feature]
old_classes = data_partition[data_partition[feature] == old_value][self._class_name].unique()
new_classes = data_partition[data_partition[feature] == new_value][self._class_name].unique()
if len(set.union(set(old_classes), set(new_classes))) > 1:
boundary_points += [data_partition.loc[row]['mid_points']]
return set(boundary_points)
def _compute_boundary_points_all_features(self):
'''
Computes all possible boundary points for each attribute in self._features (features to discretize)
:return:
'''
boundaries = {}
for attr in self._features:
data_partition = self._data.loc[:, [attr, self._class_name]]
boundaries[attr] = self._feature_boundary_points(data=data_partition, feature=attr)
return boundaries
def _boundaries_in_partition(self, data, feature):
'''
From the collection of all cut points for all features, find cut points that fall within a feature-partition's
attribute-values' range
:param data: data partition (pandas dataframe)
:param feature: attribute of interest
:return: points within feature's range
'''
range_min, range_max = (data[feature].min(), data[feature].max())
return set([x for x in self._boundaries[feature] if (x > range_min) and (x < range_max)])
def _best_cut_point(self, data, feature):
'''
Selects the best cut point for a feature in a data partition based on information gain
:param data: data partition (pandas dataframe)
:param feature: target attribute
:return: value of cut point with highest information gain (if many, picks first). None if no candidates
'''
candidates = self._boundaries_in_partition(data=data, feature=feature)
# candidates = self.feature_boundary_points(data=data, feature=feature)
if not candidates:
return None
gains = [(cut, cut_point_information_gain(dataset=data, cut_point=cut, feature_label=feature,
class_label=self._class_name)) for cut in candidates]
gains = sorted(gains, key=lambda x: x[1], reverse=True)
return gains[0][0] # return cut point
def _single_feature_accepted_cutpoints(self, feature, partition_index=pd.DataFrame().index):
'''
Computes the cuts for binning a feature according to the MDLP criterion
:param feature: attribute of interest
:param partition_index: index of examples in data partition for which cuts are required
:return: list of cuts for binning feature in partition covered by partition_index
'''
if partition_index.size == 0:
partition_index = self._data.index # if not specified, full sample to be considered for partition
data_partition = self._data.loc[partition_index, [feature, self._class_name]]
# exclude missing data:
if data_partition[feature].isnull().values.any:
data_partition = data_partition[~data_partition[feature].isnull()]
# stop if constant or null feature values
if len(data_partition[feature].unique()) < 2:
return
# determine whether to cut and where
cut_candidate = self._best_cut_point(data=data_partition, feature=feature)
if cut_candidate == None:
return
decision = self.MDLPC_criterion(data=data_partition, feature=feature, cut_point=cut_candidate)
# apply decision
if not decision:
return # if partition wasn't accepted, there's nothing else to do
if decision:
# try:
# now we have two new partitions that need to be examined
left_partition = data_partition[data_partition[feature] <= cut_candidate]
right_partition = data_partition[data_partition[feature] > cut_candidate]
if left_partition.empty or right_partition.empty:
return # extreme point selected, don't partition
self._cuts[feature] += [cut_candidate] # accept partition
self._single_feature_accepted_cutpoints(feature=feature, partition_index=left_partition.index)
self._single_feature_accepted_cutpoints(feature=feature, partition_index=right_partition.index)
# order cutpoints in ascending order
self._cuts[feature] = sorted(self._cuts[feature])
return
def _all_features_accepted_cutpoints(self):
'''
Computes cut points for all numeric features (the ones in self._features)
:return:
'''
for attr in self._features:
self._single_feature_accepted_cutpoints(feature=attr)
return
def _apply_cutpoints(self, out_data_path=None, out_bins_path=None):
'''
Discretizes data by applying bins according to self._cuts. Saves a new, discretized file, and a description of
the bins
:param out_data_path: path to save discretized data
:param out_bins_path: path to save bins description
:return:
'''
bin_label_collection = {}
for attr in self._features:
if len(self._cuts[attr]) == 0:
self._data[attr] = 'All'
bin_label_collection[attr] = ['All']
else:
cuts = [-np.inf] + self._cuts[attr] + [np.inf]
start_bin_indices = range(0, len(cuts) - 1)
bin_labels = ['%s_to_%s' % (str(cuts[i]), str(cuts[i + 1])) for i in start_bin_indices]
bin_label_collection[attr] = bin_labels
self._data[attr] = pd.cut(x=self._data[attr].values, bins=cuts, right=False, labels=bin_labels,
precision=6, include_lowest=True)
# reconstitute full data, now discretized
if self._ignored_features:
to_return = pd.concat([self._data, self._data_raw[list(self._ignored_features)]], axis=1)
to_return = to_return[self._data_raw.columns] # sort columns so they have the original order
else:
to_return = self._data
# save data as csv
if out_data_path:
to_return.to_csv(out_data_path)
# save bins description
if out_bins_path:
with open(out_bins_path, 'w') as bins_file:
print('Description of bins in file: %s' % out_data_path, file=bins_file)
# print(>>bins_file, 'Description of bins in file: %s' % out_data_path)
for attr in self._features:
print('attr: %s\n\t%s' % (attr, ', '.join([bin_label for bin_label in bin_label_collection[attr]])),
file=bins_file)
| (dataset, class_label, out_path_data=None, out_path_bins=None, features=None) |
17,544 | imodels.discretization.mdlp | MDLPC_criterion |
Determines whether a partition is accepted according to the MDLPC criterion
:param feature: feature of interest
:param cut_point: proposed cut_point
:param partition_index: index of the sample (dataframe partition) in the interval of interest
:return: True/False, whether to accept the partition
| def MDLPC_criterion(self, data, feature, cut_point):
'''
Determines whether a partition is accepted according to the MDLPC criterion
:param feature: feature of interest
:param cut_point: proposed cut_point
:param partition_index: index of the sample (dataframe partition) in the interval of interest
:return: True/False, whether to accept the partition
'''
# get dataframe only with desired attribute and class columns, and split by cut_point
data_partition = data.copy(deep=True)
data_left = data_partition[data_partition[feature] <= cut_point]
data_right = data_partition[data_partition[feature] > cut_point]
# compute information gain obtained when splitting data at cut_point
cut_point_gain = cut_point_information_gain(dataset=data_partition, cut_point=cut_point,
feature_label=feature, class_label=self._class_name)
# compute delta term in MDLPC criterion
N = len(data_partition) # number of examples in current partition
partition_entropy = entropy(data_partition[self._class_name])
k = len(data_partition[self._class_name].unique())
k_left = len(data_left[self._class_name].unique())
k_right = len(data_right[self._class_name].unique())
entropy_left = entropy(data_left[self._class_name]) # entropy of partition
entropy_right = entropy(data_right[self._class_name])
delta = log(3 ** k, 2) - (k * partition_entropy) + (k_left * entropy_left) + (k_right * entropy_right)
# to split or not to split
gain_threshold = (log(N - 1, 2) + delta) / N
if cut_point_gain > gain_threshold:
return True
else:
return False
| (self, data, feature, cut_point) |
17,545 | imodels.discretization.mdlp | __init__ |
initializes discretizer object:
saves raw copy of data and creates self._data with only features to discretize and class
computes initial entropy (before any splitting)
self._features = features to be discretized
self._classes = unique classes in raw_data
self._class_name = label of class in pandas dataframe
self._data = partition of data with only features of interest and class
self._cuts = dictionary with cut points for each feature
Params
------
dataset
pandas dataframe with data to discretize
class_label
name of the column containing class in input dataframe
features
if !None, features that the user wants to discretize specifically
| def __init__(self, dataset, class_label, out_path_data=None, out_path_bins=None, features=None):
'''
initializes discretizer object:
saves raw copy of data and creates self._data with only features to discretize and class
computes initial entropy (before any splitting)
self._features = features to be discretized
self._classes = unique classes in raw_data
self._class_name = label of class in pandas dataframe
self._data = partition of data with only features of interest and class
self._cuts = dictionary with cut points for each feature
Params
------
dataset
pandas dataframe with data to discretize
class_label
name of the column containing class in input dataframe
features
if !None, features that the user wants to discretize specifically
'''
if not isinstance(dataset, pd.core.frame.DataFrame): # class needs a pandas dataframe
raise AttributeError('input dataset should be a pandas data frame')
self._data_raw = dataset # copy or original input data
self._class_name = class_label
self._classes = self._data_raw[self._class_name] # .unique()
self._classes.drop_duplicates()
# if user specifies which attributes to discretize
if features:
self._features = [f for f in features if f in self._data_raw.columns] # check if features in dataframe
missing = set(features) - set(self._features) # specified columns not in dataframe
if missing:
print('WARNING: user-specified features %s not in input dataframe' % str(missing))
else: # then we need to recognize which features are numeric
numeric_cols = self._data_raw._data.get_numeric_data().items
self._features = [f for f in numeric_cols if f != class_label]
# other features that won't be discretized
self._ignored_features = set(self._data_raw.columns) - set(self._features)
# create copy of data only including features to discretize and class
self._data = self._data_raw.loc[:, self._features + [class_label]]
self._data = self._data.infer_objects() # convert_objects(convert_numeric=True)
# pre-compute all boundary points in dataset
self._boundaries = self._compute_boundary_points_all_features()
# initialize feature bins with empty arrays
self._cuts = {f: [] for f in self._features}
# get cuts for all features
self._all_features_accepted_cutpoints()
# discretize self._data
self._apply_cutpoints(out_data_path=out_path_data, out_bins_path=out_path_bins)
| (self, dataset, class_label, out_path_data=None, out_path_bins=None, features=None) |
17,546 | imodels.discretization.mdlp | _all_features_accepted_cutpoints |
Computes cut points for all numeric features (the ones in self._features)
:return:
| def _all_features_accepted_cutpoints(self):
'''
Computes cut points for all numeric features (the ones in self._features)
:return:
'''
for attr in self._features:
self._single_feature_accepted_cutpoints(feature=attr)
return
| (self) |
17,547 | imodels.discretization.mdlp | _apply_cutpoints |
Discretizes data by applying bins according to self._cuts. Saves a new, discretized file, and a description of
the bins
:param out_data_path: path to save discretized data
:param out_bins_path: path to save bins description
:return:
| def _apply_cutpoints(self, out_data_path=None, out_bins_path=None):
'''
Discretizes data by applying bins according to self._cuts. Saves a new, discretized file, and a description of
the bins
:param out_data_path: path to save discretized data
:param out_bins_path: path to save bins description
:return:
'''
bin_label_collection = {}
for attr in self._features:
if len(self._cuts[attr]) == 0:
self._data[attr] = 'All'
bin_label_collection[attr] = ['All']
else:
cuts = [-np.inf] + self._cuts[attr] + [np.inf]
start_bin_indices = range(0, len(cuts) - 1)
bin_labels = ['%s_to_%s' % (str(cuts[i]), str(cuts[i + 1])) for i in start_bin_indices]
bin_label_collection[attr] = bin_labels
self._data[attr] = pd.cut(x=self._data[attr].values, bins=cuts, right=False, labels=bin_labels,
precision=6, include_lowest=True)
# reconstitute full data, now discretized
if self._ignored_features:
to_return = pd.concat([self._data, self._data_raw[list(self._ignored_features)]], axis=1)
to_return = to_return[self._data_raw.columns] # sort columns so they have the original order
else:
to_return = self._data
# save data as csv
if out_data_path:
to_return.to_csv(out_data_path)
# save bins description
if out_bins_path:
with open(out_bins_path, 'w') as bins_file:
print('Description of bins in file: %s' % out_data_path, file=bins_file)
# print(>>bins_file, 'Description of bins in file: %s' % out_data_path)
for attr in self._features:
print('attr: %s\n\t%s' % (attr, ', '.join([bin_label for bin_label in bin_label_collection[attr]])),
file=bins_file)
| (self, out_data_path=None, out_bins_path=None) |
17,548 | imodels.discretization.mdlp | _best_cut_point |
Selects the best cut point for a feature in a data partition based on information gain
:param data: data partition (pandas dataframe)
:param feature: target attribute
:return: value of cut point with highest information gain (if many, picks first). None if no candidates
| def _best_cut_point(self, data, feature):
'''
Selects the best cut point for a feature in a data partition based on information gain
:param data: data partition (pandas dataframe)
:param feature: target attribute
:return: value of cut point with highest information gain (if many, picks first). None if no candidates
'''
candidates = self._boundaries_in_partition(data=data, feature=feature)
# candidates = self.feature_boundary_points(data=data, feature=feature)
if not candidates:
return None
gains = [(cut, cut_point_information_gain(dataset=data, cut_point=cut, feature_label=feature,
class_label=self._class_name)) for cut in candidates]
gains = sorted(gains, key=lambda x: x[1], reverse=True)
return gains[0][0] # return cut point
| (self, data, feature) |
17,549 | imodels.discretization.mdlp | _boundaries_in_partition |
From the collection of all cut points for all features, find cut points that fall within a feature-partition's
attribute-values' range
:param data: data partition (pandas dataframe)
:param feature: attribute of interest
:return: points within feature's range
| def _boundaries_in_partition(self, data, feature):
'''
From the collection of all cut points for all features, find cut points that fall within a feature-partition's
attribute-values' range
:param data: data partition (pandas dataframe)
:param feature: attribute of interest
:return: points within feature's range
'''
range_min, range_max = (data[feature].min(), data[feature].max())
return set([x for x in self._boundaries[feature] if (x > range_min) and (x < range_max)])
| (self, data, feature) |
17,550 | imodels.discretization.mdlp | _compute_boundary_points_all_features |
Computes all possible boundary points for each attribute in self._features (features to discretize)
:return:
| def _compute_boundary_points_all_features(self):
'''
Computes all possible boundary points for each attribute in self._features (features to discretize)
:return:
'''
boundaries = {}
for attr in self._features:
data_partition = self._data.loc[:, [attr, self._class_name]]
boundaries[attr] = self._feature_boundary_points(data=data_partition, feature=attr)
return boundaries
| (self) |
17,551 | imodels.discretization.mdlp | _feature_boundary_points |
Given an attribute, find all potential cut_points (boundary points)
:param feature: feature of interest
:param partition_index: indices of rows for which feature value falls within interval of interest
:return: array with potential cut_points
| def _feature_boundary_points(self, data, feature):
'''
Given an attribute, find all potential cut_points (boundary points)
:param feature: feature of interest
:param partition_index: indices of rows for which feature value falls within interval of interest
:return: array with potential cut_points
'''
# get dataframe with only rows of interest, and feature and class columns
data_partition = data.copy(deep=True)
data_partition.sort_values(feature, ascending=True, inplace=True)
boundary_points = []
# add temporary columns
data_partition['class_offset'] = data_partition[self._class_name].shift(
1) # column where first value is now second, and so forth
data_partition['feature_offset'] = data_partition[feature].shift(
1) # column where first value is now second, and so forth
data_partition['feature_change'] = (data_partition[feature] != data_partition['feature_offset'])
data_partition['mid_points'] = data_partition.loc[:, [feature, 'feature_offset']].mean(axis=1)
potential_cuts = data_partition[data_partition['feature_change'] == True].index[1:]
sorted_index = data_partition.index.tolist()
for row in potential_cuts:
old_value = data_partition.loc[sorted_index[sorted_index.index(row) - 1]][feature]
new_value = data_partition.loc[row][feature]
old_classes = data_partition[data_partition[feature] == old_value][self._class_name].unique()
new_classes = data_partition[data_partition[feature] == new_value][self._class_name].unique()
if len(set.union(set(old_classes), set(new_classes))) > 1:
boundary_points += [data_partition.loc[row]['mid_points']]
return set(boundary_points)
| (self, data, feature) |
17,552 | imodels.discretization.mdlp | _single_feature_accepted_cutpoints |
Computes the cuts for binning a feature according to the MDLP criterion
:param feature: attribute of interest
:param partition_index: index of examples in data partition for which cuts are required
:return: list of cuts for binning feature in partition covered by partition_index
| def _single_feature_accepted_cutpoints(self, feature, partition_index=pd.DataFrame().index):
'''
Computes the cuts for binning a feature according to the MDLP criterion
:param feature: attribute of interest
:param partition_index: index of examples in data partition for which cuts are required
:return: list of cuts for binning feature in partition covered by partition_index
'''
if partition_index.size == 0:
partition_index = self._data.index # if not specified, full sample to be considered for partition
data_partition = self._data.loc[partition_index, [feature, self._class_name]]
# exclude missing data:
if data_partition[feature].isnull().values.any:
data_partition = data_partition[~data_partition[feature].isnull()]
# stop if constant or null feature values
if len(data_partition[feature].unique()) < 2:
return
# determine whether to cut and where
cut_candidate = self._best_cut_point(data=data_partition, feature=feature)
if cut_candidate == None:
return
decision = self.MDLPC_criterion(data=data_partition, feature=feature, cut_point=cut_candidate)
# apply decision
if not decision:
return # if partition wasn't accepted, there's nothing else to do
if decision:
# try:
# now we have two new partitions that need to be examined
left_partition = data_partition[data_partition[feature] <= cut_candidate]
right_partition = data_partition[data_partition[feature] > cut_candidate]
if left_partition.empty or right_partition.empty:
return # extreme point selected, don't partition
self._cuts[feature] += [cut_candidate] # accept partition
self._single_feature_accepted_cutpoints(feature=feature, partition_index=left_partition.index)
self._single_feature_accepted_cutpoints(feature=feature, partition_index=right_partition.index)
# order cutpoints in ascending order
self._cuts[feature] = sorted(self._cuts[feature])
return
| (self, feature, partition_index=RangeIndex(start=0, stop=0, step=1)) |
17,553 | imodels.algebraic.marginal_shrinkage_linear_model | MarginalShrinkageLinearModelRegressor | null | class MarginalShrinkageLinearModelRegressor(
MarginalShrinkageLinearModel, RegressorMixin
):
...
| (est_marginal_name='ridge', est_main_name='ridge', marginal_divide_by_d=True, marginal_sign_constraint=False, alphas=[0.001, 0.01, 0.09999999999999999, 1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0], elasticnet_ratio=0.5, random_state=None) |
17,555 | imodels.algebraic.marginal_shrinkage_linear_model | __init__ |
Params
------
est_marginal_name : str
Name of estimator to use for marginal effects (marginal regression)
If "None", then assume marginal effects are zero (standard Ridge)
est_main_name : str
Name of estimator to use for main effects
If "None", then assume marginal effects are zero (standard Ridge)
"ridge", "lasso", "elasticnet"
marginal_divide_by_d : bool
If True, then divide marginal effects by n_features
marginal_sign_constraint : bool
If True, then constrain main effects to be same sign as marginal effects
alphas: Tuple[float]
Alphas to try for regularized regression (only main, not marginal)
elasticnet_ratio : float
If using elasticnet, Ratio of l1 to l2 penalty for elastic net
random_state : int
Random seed
| def __init__(
self,
est_marginal_name="ridge",
est_main_name="ridge",
marginal_divide_by_d=True,
marginal_sign_constraint=False,
alphas=np.logspace(-3, 5, num=9).tolist(),
elasticnet_ratio=0.5,
random_state=None,
):
"""
Params
------
est_marginal_name : str
Name of estimator to use for marginal effects (marginal regression)
If "None", then assume marginal effects are zero (standard Ridge)
est_main_name : str
Name of estimator to use for main effects
If "None", then assume marginal effects are zero (standard Ridge)
"ridge", "lasso", "elasticnet"
marginal_divide_by_d : bool
If True, then divide marginal effects by n_features
marginal_sign_constraint : bool
If True, then constrain main effects to be same sign as marginal effects
alphas: Tuple[float]
Alphas to try for regularized regression (only main, not marginal)
elasticnet_ratio : float
If using elasticnet, Ratio of l1 to l2 penalty for elastic net
random_state : int
Random seed
"""
self.random_state = random_state
self.est_marginal_name = est_marginal_name
self.est_main_name = est_main_name
self.marginal_divide_by_d = marginal_divide_by_d
self.marginal_sign_constraint = marginal_sign_constraint
self.elasticnet_ratio = elasticnet_ratio
if alphas is None:
alphas = np.logspace(-3, 5, num=9).tolist()
elif isinstance(alphas, float) or isinstance(alphas, int):
alphas = [alphas]
self.alphas = alphas
| (self, est_marginal_name='ridge', est_main_name='ridge', marginal_divide_by_d=True, marginal_sign_constraint=False, alphas=[0.001, 0.01, 0.09999999999999999, 1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0], elasticnet_ratio=0.5, random_state=None) |
17,559 | imodels.algebraic.marginal_shrinkage_linear_model | __str__ | null | def __str__(self):
return (
repr(self)
.replace("MarginalShrinkageLinearModel", "MSLM")
.replace("Regressor", "Reg")
.replace("Classifier", "Clf")
)
| (self) |
17,562 | imodels.algebraic.marginal_shrinkage_linear_model | _fit_main | null | def _fit_main(self, X, y, sample_weight, coef_marginal_):
# constrain main effects to be same sign as marginal effects by flipping sign
# of X appropriately and refitting with a non-negative least squares
est_main_ = self._get_est_from_name(
self.est_main_name,
alphas=self.alphas,
marginal_sign_constraint=self.marginal_sign_constraint,
)
if self.marginal_sign_constraint:
assert self.est_marginal_name is not None, "must have marginal effects"
coef_signs = np.sign(coef_marginal_)
X = X * coef_signs
est_main_.fit(X, y, sample_weight=sample_weight)
est_main_.coef_ = est_main_.coef_ * coef_signs
# check that signs do not disagree
coef_final_signs = np.sign(est_main_.coef_)
assert np.all(
(coef_final_signs == coef_signs) | (coef_final_signs == 0)
), "signs should agree but" + str(np.sign(est_main_.coef_), coef_signs)
elif est_main_ is None:
# fit dummy clf and override coefs
est_main_ = ElasticNetCV(fit_intercept=False)
est_main_.fit(X[:5], y[:5])
est_main_.coef_ = coef_marginal_
else:
# fit main estimator
# predicting residuals is the same as setting a prior over coef_marginal
# because we do solve change of variables ridge(prior = coef = coef - coef_marginal)
preds_marginal = X @ coef_marginal_
residuals = y - preds_marginal
est_main_.fit(X, residuals, sample_weight=sample_weight)
est_main_.coef_ = est_main_.coef_ + coef_marginal_
return est_main_
| (self, X, y, sample_weight, coef_marginal_) |
17,563 | imodels.algebraic.marginal_shrinkage_linear_model | _fit_marginal | null | def _fit_marginal(self, X, y, sample_weight):
# initialize marginal estimator
ALPHAS_MARGINAL = np.logspace(-1, 3, num=5).tolist()
est_marginal = self._get_est_from_name(
self.est_marginal_name,
alphas=ALPHAS_MARGINAL,
marginal_sign_constraint=False,
)
# fit marginal estimator to each feature
if est_marginal is None:
coef_marginal_ = np.zeros(X.shape[1])
else:
coef_marginal_ = []
for i in range(X.shape[1]):
est_marginal.fit(X[:, i].reshape(-1, 1), y,
sample_weight=sample_weight)
coef_marginal_.append(deepcopy(est_marginal.coef_))
coef_marginal_ = np.vstack(coef_marginal_).squeeze()
# evenly divide effects among features
if self.marginal_divide_by_d:
coef_marginal_ /= X.shape[1]
return coef_marginal_
| (self, X, y, sample_weight) |
17,565 | imodels.algebraic.marginal_shrinkage_linear_model | _get_est_from_name | null | def _get_est_from_name(self, est_name, alphas, marginal_sign_constraint):
L1_RATIOS = {
"ridge": 1e-6,
"lasso": 1,
"elasticnet": self.elasticnet_ratio,
}
if est_name not in L1_RATIOS:
return None
else:
if est_name == "ridge" and not marginal_sign_constraint:
# this implementation is better than ElasticNetCV with l1_ratio close to 0
return RidgeCV(
alphas=alphas,
fit_intercept=False,
)
return ElasticNetCV(
l1_ratio=L1_RATIOS[est_name],
alphas=alphas,
max_iter=10000,
fit_intercept=False,
positive=bool(marginal_sign_constraint),
)
| (self, est_name, alphas, marginal_sign_constraint) |
17,573 | imodels.algebraic.marginal_shrinkage_linear_model | fit | null | def fit(self, X, y, sample_weight=None):
# checks
X, y = check_X_y(X, y, accept_sparse=False, multi_output=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
if isinstance(self, ClassifierMixin):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
# preprocess X and y
self.scalar_X_ = StandardScaler()
X = self.scalar_X_.fit_transform(X)
if isinstance(self, RegressorMixin):
self.scalar_y_ = StandardScaler()
y = self.scalar_y_.fit_transform(y.reshape(-1, 1)).squeeze()
# fit marginal
self.coef_marginal_ = self._fit_marginal(X, y, sample_weight)
# fit main
self.est_main_ = self._fit_main(
X, y, sample_weight, self.coef_marginal_)
return self
| (self, X, y, sample_weight=None) |
17,576 | imodels.algebraic.marginal_shrinkage_linear_model | predict | null | def predict(self, X):
X = self.scalar_X_.transform(X)
pred = self.est_main_.predict(X)
return self.scalar_y_.inverse_transform(pred.reshape(-1, 1)).squeeze()
| (self, X) |
17,577 | imodels.algebraic.marginal_shrinkage_linear_model | predict_proba | null | def predict_proba(self, X):
X = self.scalar_X_.transform(X)
return self.est_main_.predict_proba(X)
| (self, X) |
17,582 | sklearn.base | MetaEstimatorMixin | Mixin class for all meta estimators in scikit-learn.
This mixin defines the following functionality:
- define `_required_parameters` that specify the mandatory `estimator` parameter.
Examples
--------
>>> from sklearn.base import MetaEstimatorMixin
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> class MyEstimator(MetaEstimatorMixin):
... def __init__(self, *, estimator=None):
... self.estimator = estimator
... def fit(self, X, y=None):
... if self.estimator is None:
... self.estimator_ = LogisticRegression()
... else:
... self.estimator_ = self.estimator
... return self
>>> X, y = load_iris(return_X_y=True)
>>> estimator = MyEstimator().fit(X, y)
>>> estimator.estimator_
LogisticRegression()
| class MetaEstimatorMixin:
"""Mixin class for all meta estimators in scikit-learn.
This mixin defines the following functionality:
- define `_required_parameters` that specify the mandatory `estimator` parameter.
Examples
--------
>>> from sklearn.base import MetaEstimatorMixin
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> class MyEstimator(MetaEstimatorMixin):
... def __init__(self, *, estimator=None):
... self.estimator = estimator
... def fit(self, X, y=None):
... if self.estimator is None:
... self.estimator_ = LogisticRegression()
... else:
... self.estimator_ = self.estimator
... return self
>>> X, y = load_iris(return_X_y=True)
>>> estimator = MyEstimator().fit(X, y)
>>> estimator.estimator_
LogisticRegression()
"""
_required_parameters = ["estimator"]
| () |
17,583 | imodels.rule_list.one_r | OneRClassifier | null | class OneRClassifier(GreedyRuleListClassifier):
def __init__(self, max_depth=5, class_weight=None, criterion='gini'):
self.max_depth = max_depth
self.feature_names_ = None
self.class_weight = class_weight
self.criterion = criterion
self._estimator_type = 'classifier'
def fit(self, X, y, feature_names=None):
"""Fit oneR
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
ms = []
accs = np.zeros(X.shape[1])
for col_idx in range(X.shape[1]):
x = X[:, col_idx].reshape(-1, 1)
m = GreedyRuleListClassifier(max_depth=self.max_depth, class_weight=self.class_weight,
criterion=self.criterion)
feat_names_single = [self.feature_names_[col_idx]]
m.fit(x, y, feature_names=feat_names_single)
accs[col_idx] = np.mean(m.predict(x) == y)
ms.append(m)
# print('acc', feat_names_single[0], f'{accs[col_idx]:0.2f}')
col_idx_best = np.argmax(accs)
self.rules_ = ms[col_idx_best].rules_
self.complexity_ = len(self.rules_)
# need to adjust index_col since was fitted with only 1 col
for rule in self.rules_:
if 'index_col' in rule:
rule['index_col'] += col_idx_best
self.depth = len(self.rules_)
return self
| (max_depth=5, class_weight=None, criterion='gini') |
17,585 | imodels.rule_list.one_r | __init__ | null | def __init__(self, max_depth=5, class_weight=None, criterion='gini'):
self.max_depth = max_depth
self.feature_names_ = None
self.class_weight = class_weight
self.criterion = criterion
self._estimator_type = 'classifier'
| (self, max_depth=5, class_weight=None, criterion='gini') |
17,607 | imodels.rule_list.one_r | fit | Fit oneR
| def fit(self, X, y, feature_names=None):
"""Fit oneR
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
ms = []
accs = np.zeros(X.shape[1])
for col_idx in range(X.shape[1]):
x = X[:, col_idx].reshape(-1, 1)
m = GreedyRuleListClassifier(max_depth=self.max_depth, class_weight=self.class_weight,
criterion=self.criterion)
feat_names_single = [self.feature_names_[col_idx]]
m.fit(x, y, feature_names=feat_names_single)
accs[col_idx] = np.mean(m.predict(x) == y)
ms.append(m)
# print('acc', feat_names_single[0], f'{accs[col_idx]:0.2f}')
col_idx_best = np.argmax(accs)
self.rules_ = ms[col_idx_best].rules_
self.complexity_ = len(self.rules_)
# need to adjust index_col since was fitted with only 1 col
for rule in self.rules_:
if 'index_col' in rule:
rule['index_col'] += col_idx_best
self.depth = len(self.rules_)
return self
| (self, X, y, feature_names=None) |
17,617 | imodels.rule_list.corels_wrapper | OptimalRuleListClassifier | Certifiably Optimal RulE ListS classifier.
This class implements the CORELS algorithm, designed to produce human-interpretable, optimal
rulelists for binary feature data and binary classification. As an alternative to other
tree based algorithms such as CART, CORELS provides a certificate of optimality for its
rulelist given a training set, leveraging multiple algorithmic bounds to do so.
In order to use run the algorithm, create an instance of the `CorelsClassifier` class,
providing any necessary parameters in its constructor, and then call `fit` to generate
a rulelist. `printrl` prints the generated rulelist, while `predict` provides
classification predictions for a separate test dataset with the same features. To determine
the algorithm's accuracy, run `score` on an evaluation dataset with labels.
To save a generated rulelist to a file, call `save`. To load it back from the file, call `load`.
Attributes
----------
c : float, optional (default=0.01)
Regularization parameter. Higher values penalize longer rulelists.
n_iter : int, optional (default=10000)
Maximum number of nodes (rulelists) to search before exiting.
map_type : str, optional (default="prefix")
The type of prefix map to use. Supported maps are "none" for no map,
"prefix" for a map that uses rule prefixes for keys, "captured" for
a map with a prefix's captured vector as keys.
policy : str, optional (default="lower_bound")
The search policy for traversing the tree (i.e. the criterion with which
to order nodes in the queue). Supported criteria are "bfs", for breadth-first
search; "curious", which attempts to find the most promising node;
"lower_bound" which is the objective function evaluated with that rulelist
minus the default prediction error; "objective" for the objective function
evaluated at that rulelist; and "dfs" for depth-first search.
verbosity : list, optional (default=["rulelist"])
The verbosity levels required. A list of strings, it can contain any
subset of ["rulelist", "rule", "label", "minor", "samples", "progress", "mine", "loud"].
An empty list ([]) indicates 'silent' mode.
- "rulelist" prints the generated rulelist at the end.
- "rule" prints a summary of each rule generated.
- "label" prints a summary of the class labels.
- "minor" prints a summary of the minority bound.
- "samples" produces a complete dump of the rules, label, and/or minor data. You must also provide at least one of "rule", "label", or "minor" to specify which data you want to dump, or "loud" for all data. The "samples" option often spits out a lot of output.
- "progress" prints periodic messages as corels runs.
- "mine" prints debug information while mining rules, including each rule as it is generated.
- "loud" is the equivalent of ["progress", "label", "rule", "mine", "minor"].
ablation : int, optional (default=0)
Specifies addition parameters for the bounds used while searching. Accepted
values are 0 (all bounds), 1 (no antecedent support bound), and 2 (no
lookahead bound).
max_card : int, optional (default=2)
Maximum cardinality allowed when mining rules. Can be any value greater than
or equal to 1. For instance, a value of 2 would only allow rules that combine
at most two features in their antecedents.
min_support : float, optional (default=0.01)
The fraction of samples that a rule must capture in order to be used. 1 minus
this value is also the maximum fraction of samples a rule can capture.
Can be any value between 0.0 and 0.5.
References
----------
Elaine Angelino, Nicholas Larus-Stone, Daniel Alabi, Margo Seltzer, and Cynthia Rudin.
Learning Certifiably Optimal Rule Lists for Categorical Data. KDD 2017.
Journal of Machine Learning Research, 2018; 19: 1-77. arXiv:1704.01701, 2017
Examples
--------
| class OptimalRuleListClassifier(GreedyRuleListClassifier if not corels_supported else CorelsClassifier):
"""Certifiably Optimal RulE ListS classifier.
This class implements the CORELS algorithm, designed to produce human-interpretable, optimal
rulelists for binary feature data and binary classification. As an alternative to other
tree based algorithms such as CART, CORELS provides a certificate of optimality for its
rulelist given a training set, leveraging multiple algorithmic bounds to do so.
In order to use run the algorithm, create an instance of the `CorelsClassifier` class,
providing any necessary parameters in its constructor, and then call `fit` to generate
a rulelist. `printrl` prints the generated rulelist, while `predict` provides
classification predictions for a separate test dataset with the same features. To determine
the algorithm's accuracy, run `score` on an evaluation dataset with labels.
To save a generated rulelist to a file, call `save`. To load it back from the file, call `load`.
Attributes
----------
c : float, optional (default=0.01)
Regularization parameter. Higher values penalize longer rulelists.
n_iter : int, optional (default=10000)
Maximum number of nodes (rulelists) to search before exiting.
map_type : str, optional (default="prefix")
The type of prefix map to use. Supported maps are "none" for no map,
"prefix" for a map that uses rule prefixes for keys, "captured" for
a map with a prefix's captured vector as keys.
policy : str, optional (default="lower_bound")
The search policy for traversing the tree (i.e. the criterion with which
to order nodes in the queue). Supported criteria are "bfs", for breadth-first
search; "curious", which attempts to find the most promising node;
"lower_bound" which is the objective function evaluated with that rulelist
minus the default prediction error; "objective" for the objective function
evaluated at that rulelist; and "dfs" for depth-first search.
verbosity : list, optional (default=["rulelist"])
The verbosity levels required. A list of strings, it can contain any
subset of ["rulelist", "rule", "label", "minor", "samples", "progress", "mine", "loud"].
An empty list ([]) indicates 'silent' mode.
- "rulelist" prints the generated rulelist at the end.
- "rule" prints a summary of each rule generated.
- "label" prints a summary of the class labels.
- "minor" prints a summary of the minority bound.
- "samples" produces a complete dump of the rules, label, and/or minor data. You must also provide at least one of "rule", "label", or "minor" to specify which data you want to dump, or "loud" for all data. The "samples" option often spits out a lot of output.
- "progress" prints periodic messages as corels runs.
- "mine" prints debug information while mining rules, including each rule as it is generated.
- "loud" is the equivalent of ["progress", "label", "rule", "mine", "minor"].
ablation : int, optional (default=0)
Specifies addition parameters for the bounds used while searching. Accepted
values are 0 (all bounds), 1 (no antecedent support bound), and 2 (no
lookahead bound).
max_card : int, optional (default=2)
Maximum cardinality allowed when mining rules. Can be any value greater than
or equal to 1. For instance, a value of 2 would only allow rules that combine
at most two features in their antecedents.
min_support : float, optional (default=0.01)
The fraction of samples that a rule must capture in order to be used. 1 minus
this value is also the maximum fraction of samples a rule can capture.
Can be any value between 0.0 and 0.5.
References
----------
Elaine Angelino, Nicholas Larus-Stone, Daniel Alabi, Margo Seltzer, and Cynthia Rudin.
Learning Certifiably Optimal Rule Lists for Categorical Data. KDD 2017.
Journal of Machine Learning Research, 2018; 19: 1-77. arXiv:1704.01701, 2017
Examples
--------
"""
def __init__(self, c=0.01, n_iter=10000, map_type="prefix", policy="lower_bound",
verbosity=[], ablation=0, max_card=2, min_support=0.01, random_state=0):
if corels_supported:
super().__init__(c, n_iter, map_type, policy, verbosity, ablation, max_card, min_support)
else:
warnings.warn("Should install corels with pip install corels. Using GreedyRuleList instead.")
super().__init__()
self.fit = super().fit
self.predict = super().predict
self.predict_proba = super().predict_proba
self.__str__ = super().__str__
self.random_state = random_state
self.discretizer = None
self.str_print = None
self._estimator_type = 'classifier'
def fit(self, X, y, feature_names=None, prediction_name="prediction"):
"""
Build a CORELS classifier from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8.
y : array-line, shape = [n_samples]
The target values for the training input. Must be binary.
feature_names : list, optional(default=None)
A list of strings of length n_features. Specifies the names of each
of the features. If an empty list is provided, the feature names
are set to the default of ["feature1", "feature2"... ].
prediction_name : string, optional(default="prediction")
The name of the feature that is being predicted.
Returns
-------
self : obj
"""
if isinstance(X, pd.DataFrame):
if feature_names is None:
feature_names = X.columns.tolist()
X = X.values
elif feature_names is None:
feature_names = ['X_' + str(i) for i in range(X.shape[1])]
# check if any non-binary values
if not np.isin(X, [0, 1]).all().all():
self.discretizer = KBinsDiscretizer(encode='onehot-dense')
self.discretizer.fit(X, y)
"""
feature_names = [f'{col}_{b}'
for col, bins in zip(feature_names, self.discretizer.n_bins_)
for b in range(bins)]
"""
feature_names = self.discretizer.get_feature_names_out()
X = self.discretizer.transform(X)
np.random.seed(self.random_state)
# feature_names = feature_names.tolist()
super().fit(X, y, features=feature_names, prediction_name=prediction_name)
# try:
self._traverse_rule(X, y, feature_names)
# except:
# self.str_print = None
self.complexity_ = self._get_complexity()
return self
def predict(self, X):
"""
Predict classifications of the input samples X.
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[int] of shape = [n_samples].
The classifications of the input samples.
"""
if self.discretizer is not None:
X = self.discretizer.transform(X)
return super().predict(X).astype(int)
def predict_proba(self, X):
"""
Predict probabilities of the input samples X.
todo: actually calculate these from training set
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[float] of shape = [n_samples, 2].
The probabilities of the input samples.
"""
preds = self.predict(X)
return np.vstack((1 - preds, preds)).transpose()
def _traverse_rule(self, X: np.ndarray, y: np.ndarray, feature_names: List[str], print_colors=False):
"""Traverse rule and build up string representation
Parameters
----------
df_features
Returns
-------
"""
str_print = f''
df = pd.DataFrame(X, columns=feature_names)
df.loc[:, 'y'] = y
o = 'y'
str_print += f' {df[o].sum()} / {df.shape[0]} (positive class / total)\n'
if print_colors:
color_start = '\033[96m'
color_end = '\033[00m'
else:
color_start = ''
color_end = ''
if len(self.rl_.rules) > 1:
str_print += f'\t\u2193 \n'
else:
str_print += ' No rules learned\n'
for j, rule in enumerate(self.rl_.rules[:-1]):
antecedents = rule['antecedents']
query = ''
for i, feat_idx in enumerate(antecedents):
if i > 0:
query += ' & '
if feat_idx < 0:
query += f'(`{feature_names[-feat_idx - 1]}` == 0)'
else:
query += f'(`{feature_names[feat_idx - 1]}` == 1)'
df_rhs = df.query(query)
idxs_satisfying_rule = df_rhs.index
df.drop(index=idxs_satisfying_rule, inplace=True)
computed_prob = 100 * df_rhs[o].sum() / (df_rhs.shape[0] + 1e-10)
# add to str_print
query_print = query.replace('== 1', '').replace('(', '').replace(')', '').replace('`', '')
str_print += f'{color_start}If {query_print:<35}{color_end} \u2192 {df_rhs[o].sum():>3} / {df_rhs.shape[0]:>4} ({computed_prob:0.1f}%)\n\t\u2193 \n {df[o].sum():>3} / {df.shape[0]:>5}\t \n'
if not (j == len(self.rl_.rules) - 2 and i == len(antecedents) - 1):
str_print += '\t\u2193 \n'
self.str_print = str_print
def __str__(self):
if corels_supported:
if self.str_print is not None:
return 'OptimalRuleList:\n\n' + self.str_print
else:
return 'OptimalRuleList:\n\n' + self.rl_.__str__()
else:
return super().__str__()
def _get_complexity(self):
return sum([len(corule['antecedents']) for corule in self.rl_.rules])
| (c=0.01, n_iter=10000, map_type='prefix', policy='lower_bound', verbosity=[], ablation=0, max_card=2, min_support=0.01, random_state=0) |
17,619 | imodels.rule_list.corels_wrapper | __init__ | null | def __init__(self, c=0.01, n_iter=10000, map_type="prefix", policy="lower_bound",
verbosity=[], ablation=0, max_card=2, min_support=0.01, random_state=0):
if corels_supported:
super().__init__(c, n_iter, map_type, policy, verbosity, ablation, max_card, min_support)
else:
warnings.warn("Should install corels with pip install corels. Using GreedyRuleList instead.")
super().__init__()
self.fit = super().fit
self.predict = super().predict
self.predict_proba = super().predict_proba
self.__str__ = super().__str__
self.random_state = random_state
self.discretizer = None
self.str_print = None
self._estimator_type = 'classifier'
| (self, c=0.01, n_iter=10000, map_type='prefix', policy='lower_bound', verbosity=[], ablation=0, max_card=2, min_support=0.01, random_state=0) |
17,623 | imodels.rule_list.corels_wrapper | __str__ | null | def __str__(self):
if corels_supported:
if self.str_print is not None:
return 'OptimalRuleList:\n\n' + self.str_print
else:
return 'OptimalRuleList:\n\n' + self.rl_.__str__()
else:
return super().__str__()
| (self) |
17,628 | imodels.rule_list.corels_wrapper | _get_complexity | null | def _get_complexity(self):
return sum([len(corule['antecedents']) for corule in self.rl_.rules])
| (self) |
17,638 | imodels.rule_list.corels_wrapper | _traverse_rule | Traverse rule and build up string representation
Parameters
----------
df_features
Returns
-------
| def _traverse_rule(self, X: np.ndarray, y: np.ndarray, feature_names: List[str], print_colors=False):
"""Traverse rule and build up string representation
Parameters
----------
df_features
Returns
-------
"""
str_print = f''
df = pd.DataFrame(X, columns=feature_names)
df.loc[:, 'y'] = y
o = 'y'
str_print += f' {df[o].sum()} / {df.shape[0]} (positive class / total)\n'
if print_colors:
color_start = '\033[96m'
color_end = '\033[00m'
else:
color_start = ''
color_end = ''
if len(self.rl_.rules) > 1:
str_print += f'\t\u2193 \n'
else:
str_print += ' No rules learned\n'
for j, rule in enumerate(self.rl_.rules[:-1]):
antecedents = rule['antecedents']
query = ''
for i, feat_idx in enumerate(antecedents):
if i > 0:
query += ' & '
if feat_idx < 0:
query += f'(`{feature_names[-feat_idx - 1]}` == 0)'
else:
query += f'(`{feature_names[feat_idx - 1]}` == 1)'
df_rhs = df.query(query)
idxs_satisfying_rule = df_rhs.index
df.drop(index=idxs_satisfying_rule, inplace=True)
computed_prob = 100 * df_rhs[o].sum() / (df_rhs.shape[0] + 1e-10)
# add to str_print
query_print = query.replace('== 1', '').replace('(', '').replace(')', '').replace('`', '')
str_print += f'{color_start}If {query_print:<35}{color_end} \u2192 {df_rhs[o].sum():>3} / {df_rhs.shape[0]:>4} ({computed_prob:0.1f}%)\n\t\u2193 \n {df[o].sum():>3} / {df.shape[0]:>5}\t \n'
if not (j == len(self.rl_.rules) - 2 and i == len(antecedents) - 1):
str_print += '\t\u2193 \n'
self.str_print = str_print
| (self, X: numpy.ndarray, y: numpy.ndarray, feature_names: List[str], print_colors=False) |
17,642 | imodels.rule_list.corels_wrapper | fit |
Build a CORELS classifier from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8.
y : array-line, shape = [n_samples]
The target values for the training input. Must be binary.
feature_names : list, optional(default=None)
A list of strings of length n_features. Specifies the names of each
of the features. If an empty list is provided, the feature names
are set to the default of ["feature1", "feature2"... ].
prediction_name : string, optional(default="prediction")
The name of the feature that is being predicted.
Returns
-------
self : obj
| def fit(self, X, y, feature_names=None, prediction_name="prediction"):
"""
Build a CORELS classifier from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8.
y : array-line, shape = [n_samples]
The target values for the training input. Must be binary.
feature_names : list, optional(default=None)
A list of strings of length n_features. Specifies the names of each
of the features. If an empty list is provided, the feature names
are set to the default of ["feature1", "feature2"... ].
prediction_name : string, optional(default="prediction")
The name of the feature that is being predicted.
Returns
-------
self : obj
"""
if isinstance(X, pd.DataFrame):
if feature_names is None:
feature_names = X.columns.tolist()
X = X.values
elif feature_names is None:
feature_names = ['X_' + str(i) for i in range(X.shape[1])]
# check if any non-binary values
if not np.isin(X, [0, 1]).all().all():
self.discretizer = KBinsDiscretizer(encode='onehot-dense')
self.discretizer.fit(X, y)
"""
feature_names = [f'{col}_{b}'
for col, bins in zip(feature_names, self.discretizer.n_bins_)
for b in range(bins)]
"""
feature_names = self.discretizer.get_feature_names_out()
X = self.discretizer.transform(X)
np.random.seed(self.random_state)
# feature_names = feature_names.tolist()
super().fit(X, y, features=feature_names, prediction_name=prediction_name)
# try:
self._traverse_rule(X, y, feature_names)
# except:
# self.str_print = None
self.complexity_ = self._get_complexity()
return self
| (self, X, y, feature_names=None, prediction_name='prediction') |
17,646 | imodels.rule_list.corels_wrapper | predict |
Predict classifications of the input samples X.
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[int] of shape = [n_samples].
The classifications of the input samples.
| def predict(self, X):
"""
Predict classifications of the input samples X.
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[int] of shape = [n_samples].
The classifications of the input samples.
"""
if self.discretizer is not None:
X = self.discretizer.transform(X)
return super().predict(X).astype(int)
| (self, X) |
17,647 | imodels.rule_list.corels_wrapper | predict_proba |
Predict probabilities of the input samples X.
todo: actually calculate these from training set
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[float] of shape = [n_samples, 2].
The probabilities of the input samples.
| def predict_proba(self, X):
"""
Predict probabilities of the input samples X.
todo: actually calculate these from training set
Arguments
---------
X : array-like, shape = [n_samples, n_features]
The training input samples. All features must be binary, and the matrix
is internally converted to dtype=np.uint8. The features must be the same
as those of the data used to train the model.
Returns
-------
p : array[float] of shape = [n_samples, 2].
The probabilities of the input samples.
"""
preds = self.predict(X)
return np.vstack((1 - preds, preds)).transpose()
| (self, X) |
17,649 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
prediction_name : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``prediction_name`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_list.corels_wrapper.OptimalRuleListClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', prediction_name: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_list.corels_wrapper.OptimalRuleListClassifier |
17,652 | imodels.tree.gosdt.pygosdt | OptimalTreeClassifier | null | class OptimalTreeClassifier(GreedyTreeClassifier if not gosdt_supported else BaseEstimator):
def __init__(self,
balance=False,
cancellation=True,
look_ahead=True,
similar_support=True,
feature_exchange=True,
continuous_feature_exchange=True,
rule_list=False,
diagnostics=False,
verbose=False,
regularization=0.05,
uncertainty_tolerance=0.0,
upperbound=0.0,
model_limit=1,
precision_limit=0,
stack_limit=0,
tile_limit=0,
time_limit=0,
worker_limit=1,
random_state=None,
costs="",
model="",
profile="",
timing="",
trace="",
tree=""):
super().__init__()
self.balance = balance
self.cancellation = cancellation
self.look_ahead = look_ahead
self.similar_support = similar_support
self.feature_exchange = feature_exchange
self.continuous_feature_exchange = continuous_feature_exchange
self.rule_list = rule_list
self.diagnostics = diagnostics
self.verbose = verbose
self.regularization = regularization
self.uncertainty_tolerance = uncertainty_tolerance
self.upperbound = upperbound
self.model_limit = model_limit
self.precision_limit = precision_limit
self.stack_limit = stack_limit
self.tile_limit = tile_limit
self.time_limit = time_limit
self.worker_limit = worker_limit
self.costs = costs
self.model = model
self.profile = profile
self.timing = timing
self.trace = trace
self.tree = tree
self.tree_type = 'gosdt'
self.random_state = random_state
if random_state is not None:
np.random.seed(random_state)
def load(self, path):
"""
Parameters
---
path : string
path to a JSON file representing a model
"""
with open(path, 'r') as model_source:
result = model_source.read()
result = json.loads(result)
self.tree_ = TreeClassifier(result[0])
def fit(self, X, y, feature_names=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples, 1]
column containing the correct label for each sample in X
Modifies
---
trains the model so that this model instance is ready for prediction
"""
try:
import gosdt
if not isinstance(X, pd.DataFrame):
self.feature_names_ = list(rule.get_feature_dict(X.shape[1], feature_names).keys())
X = pd.DataFrame(X, columns=self.feature_names_)
else:
self.feature_names_ = X.columns
if not isinstance(y, pd.DataFrame):
y = pd.DataFrame(y, columns=['target'])
# gosdt extension expects serialized CSV, which we generate via pandas
dataset_with_target = pd.concat((X, y), axis=1)
# Perform C++ extension calls to train the model
configuration = self._get_configuration()
gosdt.configure(json.dumps(configuration, separators=(',', ':')))
result = gosdt.fit(dataset_with_target.to_csv(index=False))
result = json.loads(result)
self.tree_ = TreeClassifier(result[0])
# Record the training time, number of iterations, and graph size required
self.time_ = gosdt.time()
self.iterations_ = gosdt.iterations()
self.size_ = gosdt.size()
except ImportError:
warnings.warn(
"Should install gosdt extension. On x86_64 linux or macOS: "
"'pip install gosdt-deprecated'. On other platforms, see "
"https://github.com/keyan3/GeneralizedOptimalSparseDecisionTrees. "
"Defaulting to Non-optimal DecisionTreeClassifier."
)
# dtree = DecisionTreeClassifierWithComplexity()
# dtree.fit(X, y)
# self.tree_ = dtree
super().fit(X, y, feature_names=feature_names)
self.tree_type = 'dt'
return self
def predict(self, X):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
a matrix where each row is a sample to be predicted and each column is a feature to
be used for prediction
Returns
---
array-like, shape = [n_samples, 1] : a column where each element is the prediction
associated with each row
"""
validation.check_is_fitted(self)
if self.tree_type == 'gosdt':
if type(self.tree_) is TreeClassifier and not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
return self.tree_.predict(X)
else:
return super().predict(X)
def predict_proba(self, X):
validation.check_is_fitted(self)
if self.tree_type == 'gosdt':
if type(self.tree_) is TreeClassifier and not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
probs = np.expand_dims(self.tree_.confidence(X), axis=1)
return np.hstack((1 - probs, probs))
else:
return super().predict_proba(X)
def score(self, X, y, weight=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples,]
an n-by-1 column of labels associated with each sample
weight : shape = [n_samples,]
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
real number : the accuracy produced by applying this model over the given dataset, with
optionals for weighted accuracy
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
return self.tree_.score(X, y, weight=weight)
else:
return self.tree_.score(X, y, sample_weight=weight)
def __len__(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return len(self.tree_)
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
def leaves(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.leaves()
else:
return self.tree_.get_n_leaves()
def nodes(self):
"""
Returns
---
natural number : The number of nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.nodes()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
def max_depth(self):
"""
Returns
---
natural number : the length of the longest decision path in this tree. A single-node tree
will return 1.
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.maximum_depth()
else:
return self.tree_.get_depth()
def latex(self):
"""
Note
---
This method doesn't work well for label headers that contain underscores due to underscore
being a reserved character in LaTeX
Returns
---
string : A LaTeX string representing the model
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.latex()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
def json(self):
"""
Returns
---
string : A JSON string representing the model
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.json()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
def _get_configuration(self):
return {
"balance": self.balance,
"cancellation": self.cancellation,
"look_ahead": self.look_ahead,
"similar_support": self.similar_support,
"feature_exchange": self.feature_exchange,
"continuous_feature_exchange": self.continuous_feature_exchange,
"rule_list": self.rule_list,
"diagnostics": self.diagnostics,
"verbose": self.verbose,
"regularization": self.regularization,
"uncertainty_tolerance": self.uncertainty_tolerance,
"upperbound": self.upperbound,
"model_limit": self.model_limit,
"precision_limit": self.precision_limit,
"stack_limit": self.stack_limit,
"tile_limit": self.tile_limit,
"time_limit": self.time_limit,
"worker_limit": self.worker_limit,
"costs": self.costs,
"model": self.model,
"profile": self.profile,
"timing": self.timing,
"trace": self.trace,
"tree": self.tree
}
| (balance=False, cancellation=True, look_ahead=True, similar_support=True, feature_exchange=True, continuous_feature_exchange=True, rule_list=False, diagnostics=False, verbose=False, regularization=0.05, uncertainty_tolerance=0.0, upperbound=0.0, model_limit=1, precision_limit=0, stack_limit=0, tile_limit=0, time_limit=0, worker_limit=1, random_state=None, costs='', model='', profile='', timing='', trace='', tree='') |
17,654 | imodels.tree.gosdt.pygosdt | __init__ | null | def __init__(self,
balance=False,
cancellation=True,
look_ahead=True,
similar_support=True,
feature_exchange=True,
continuous_feature_exchange=True,
rule_list=False,
diagnostics=False,
verbose=False,
regularization=0.05,
uncertainty_tolerance=0.0,
upperbound=0.0,
model_limit=1,
precision_limit=0,
stack_limit=0,
tile_limit=0,
time_limit=0,
worker_limit=1,
random_state=None,
costs="",
model="",
profile="",
timing="",
trace="",
tree=""):
super().__init__()
self.balance = balance
self.cancellation = cancellation
self.look_ahead = look_ahead
self.similar_support = similar_support
self.feature_exchange = feature_exchange
self.continuous_feature_exchange = continuous_feature_exchange
self.rule_list = rule_list
self.diagnostics = diagnostics
self.verbose = verbose
self.regularization = regularization
self.uncertainty_tolerance = uncertainty_tolerance
self.upperbound = upperbound
self.model_limit = model_limit
self.precision_limit = precision_limit
self.stack_limit = stack_limit
self.tile_limit = tile_limit
self.time_limit = time_limit
self.worker_limit = worker_limit
self.costs = costs
self.model = model
self.profile = profile
self.timing = timing
self.trace = trace
self.tree = tree
self.tree_type = 'gosdt'
self.random_state = random_state
if random_state is not None:
np.random.seed(random_state)
| (self, balance=False, cancellation=True, look_ahead=True, similar_support=True, feature_exchange=True, continuous_feature_exchange=True, rule_list=False, diagnostics=False, verbose=False, regularization=0.05, uncertainty_tolerance=0.0, upperbound=0.0, model_limit=1, precision_limit=0, stack_limit=0, tile_limit=0, time_limit=0, worker_limit=1, random_state=None, costs='', model='', profile='', timing='', trace='', tree='') |
17,655 | imodels.tree.gosdt.pygosdt | __len__ |
Returns
---
natural number : The number of terminal nodes present in this tree
| def __len__(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return len(self.tree_)
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
| (self) |
17,664 | imodels.tree.gosdt.pygosdt | _get_configuration | null | def _get_configuration(self):
return {
"balance": self.balance,
"cancellation": self.cancellation,
"look_ahead": self.look_ahead,
"similar_support": self.similar_support,
"feature_exchange": self.feature_exchange,
"continuous_feature_exchange": self.continuous_feature_exchange,
"rule_list": self.rule_list,
"diagnostics": self.diagnostics,
"verbose": self.verbose,
"regularization": self.regularization,
"uncertainty_tolerance": self.uncertainty_tolerance,
"upperbound": self.upperbound,
"model_limit": self.model_limit,
"precision_limit": self.precision_limit,
"stack_limit": self.stack_limit,
"tile_limit": self.tile_limit,
"time_limit": self.time_limit,
"worker_limit": self.worker_limit,
"costs": self.costs,
"model": self.model,
"profile": self.profile,
"timing": self.timing,
"trace": self.trace,
"tree": self.tree
}
| (self) |
17,680 | imodels.tree.gosdt.pygosdt | fit |
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples, 1]
column containing the correct label for each sample in X
Modifies
---
trains the model so that this model instance is ready for prediction
| def fit(self, X, y, feature_names=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
matrix containing the training samples and features
y : array-like, shape = [n_samples, 1]
column containing the correct label for each sample in X
Modifies
---
trains the model so that this model instance is ready for prediction
"""
try:
import gosdt
if not isinstance(X, pd.DataFrame):
self.feature_names_ = list(rule.get_feature_dict(X.shape[1], feature_names).keys())
X = pd.DataFrame(X, columns=self.feature_names_)
else:
self.feature_names_ = X.columns
if not isinstance(y, pd.DataFrame):
y = pd.DataFrame(y, columns=['target'])
# gosdt extension expects serialized CSV, which we generate via pandas
dataset_with_target = pd.concat((X, y), axis=1)
# Perform C++ extension calls to train the model
configuration = self._get_configuration()
gosdt.configure(json.dumps(configuration, separators=(',', ':')))
result = gosdt.fit(dataset_with_target.to_csv(index=False))
result = json.loads(result)
self.tree_ = TreeClassifier(result[0])
# Record the training time, number of iterations, and graph size required
self.time_ = gosdt.time()
self.iterations_ = gosdt.iterations()
self.size_ = gosdt.size()
except ImportError:
warnings.warn(
"Should install gosdt extension. On x86_64 linux or macOS: "
"'pip install gosdt-deprecated'. On other platforms, see "
"https://github.com/keyan3/GeneralizedOptimalSparseDecisionTrees. "
"Defaulting to Non-optimal DecisionTreeClassifier."
)
# dtree = DecisionTreeClassifierWithComplexity()
# dtree.fit(X, y)
# self.tree_ = dtree
super().fit(X, y, feature_names=feature_names)
self.tree_type = 'dt'
return self
| (self, X, y, feature_names=None) |
17,685 | imodels.tree.gosdt.pygosdt | json |
Returns
---
string : A JSON string representing the model
| def json(self):
"""
Returns
---
string : A JSON string representing the model
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.json()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
| (self) |
17,686 | imodels.tree.gosdt.pygosdt | latex |
Note
---
This method doesn't work well for label headers that contain underscores due to underscore
being a reserved character in LaTeX
Returns
---
string : A LaTeX string representing the model
| def latex(self):
"""
Note
---
This method doesn't work well for label headers that contain underscores due to underscore
being a reserved character in LaTeX
Returns
---
string : A LaTeX string representing the model
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.latex()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
| (self) |
17,687 | imodels.tree.gosdt.pygosdt | leaves |
Returns
---
natural number : The number of terminal nodes present in this tree
| def leaves(self):
"""
Returns
---
natural number : The number of terminal nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.leaves()
else:
return self.tree_.get_n_leaves()
| (self) |
17,688 | imodels.tree.gosdt.pygosdt | load |
Parameters
---
path : string
path to a JSON file representing a model
| def load(self, path):
"""
Parameters
---
path : string
path to a JSON file representing a model
"""
with open(path, 'r') as model_source:
result = model_source.read()
result = json.loads(result)
self.tree_ = TreeClassifier(result[0])
| (self, path) |
17,689 | imodels.tree.gosdt.pygosdt | max_depth |
Returns
---
natural number : the length of the longest decision path in this tree. A single-node tree
will return 1.
| def max_depth(self):
"""
Returns
---
natural number : the length of the longest decision path in this tree. A single-node tree
will return 1.
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.maximum_depth()
else:
return self.tree_.get_depth()
| (self) |
17,690 | imodels.tree.gosdt.pygosdt | nodes |
Returns
---
natural number : The number of nodes present in this tree
| def nodes(self):
"""
Returns
---
natural number : The number of nodes present in this tree
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
return self.tree_.nodes()
else:
warnings.warn("Using DecisionTreeClassifier due to absence of gosdt package. "
"DecisionTreeClassifier does not have this method.")
return None
| (self) |
17,691 | imodels.tree.gosdt.pygosdt | predict |
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
a matrix where each row is a sample to be predicted and each column is a feature to
be used for prediction
Returns
---
array-like, shape = [n_samples, 1] : a column where each element is the prediction
associated with each row
| def predict(self, X):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
a matrix where each row is a sample to be predicted and each column is a feature to
be used for prediction
Returns
---
array-like, shape = [n_samples, 1] : a column where each element is the prediction
associated with each row
"""
validation.check_is_fitted(self)
if self.tree_type == 'gosdt':
if type(self.tree_) is TreeClassifier and not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
return self.tree_.predict(X)
else:
return super().predict(X)
| (self, X) |
17,693 | imodels.tree.gosdt.pygosdt | predict_proba | null | def predict_proba(self, X):
validation.check_is_fitted(self)
if self.tree_type == 'gosdt':
if type(self.tree_) is TreeClassifier and not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
probs = np.expand_dims(self.tree_.confidence(X), axis=1)
return np.hstack((1 - probs, probs))
else:
return super().predict_proba(X)
| (self, X) |
17,694 | imodels.tree.gosdt.pygosdt | score |
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples,]
an n-by-1 column of labels associated with each sample
weight : shape = [n_samples,]
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
real number : the accuracy produced by applying this model over the given dataset, with
optionals for weighted accuracy
| def score(self, X, y, weight=None):
"""
Parameters
---
X : matrix-like, shape = [n_samples, m_features]
an n-by-m matrix of sample and their features
y : array-like, shape = [n_samples,]
an n-by-1 column of labels associated with each sample
weight : shape = [n_samples,]
an n-by-1 column of weights to apply to each sample's misclassification
Returns
---
real number : the accuracy produced by applying this model over the given dataset, with
optionals for weighted accuracy
"""
validation.check_is_fitted(self)
if type(self.tree_) is TreeClassifier:
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X, columns=self.feature_names_)
return self.tree_.score(X, y, weight=weight)
else:
return self.tree_.score(X, y, sample_weight=weight)
| (self, X, y, weight=None) |
17,700 | imodels.discretization.discretizer | RFDiscretizer |
Discretize numeric data into bins using RF splits.
Parameters
----------
rf_model : RandomForestClassifer() or RandomForestRegressor()
RF model from which to extract splits for discretization.
Default is RandomForestClassifer(n_estimators = 500) or
RandomForestRegressor(n_estimators = 500)
classification : boolean; default=False
Used only if rf_model=None. If True,
rf_model=RandomForestClassifier(n_estimators = 500).
Else, rf_model=RandomForestRegressor(n_estimators = 500)
n_bins : int or array-like of shape (len(dcols),), default=2
Number of bins to discretize each feature into.
dcols : list of strings
The names of the columns to be discretized; by default,
discretize all float and int columns in X.
encode : {‘onehot’, ‘ordinal’}, default=’onehot’
Method used to encode the transformed result.
onehot - Encode the transformed result with one-hot encoding and
return a dense array.
ordinal - Return the bin identifier encoded as an integer value.
strategy : {‘uniform’, ‘quantile’}, default=’quantile’
Strategy used to choose RF split points.
uniform - RF split points chosen to be uniformly spaced out.
quantile - RF split points chosen based on equally-spaced quantiles.
backup_strategy : {‘uniform’, ‘quantile’, ‘kmeans’}, default=’quantile’
Strategy used to define the widths of the bins if no rf splits exist for
that feature. Used in KBinsDiscretizer.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D
k-means cluster.
onehot_drop : {‘first’, ‘if_binary’} or array-like of shape (len(dcols),), default='if_binary'
Specifies a methodology to use to drop one of the categories
per feature when encode = "onehot".
None
Retain all features (the default).
‘first’
Drop the first y_str in each feature. If only one y_str
is present, the feature will be dropped entirely.
‘if_binary’
Drop the first y_str in each feature with two categories.
Features with 1 or more than 2 categories are left intact.
Attributes
----------
rf_splits : dictionary where
key = feature name
value = array of all RF split threshold values
bin_edges_ : dictionary where
key = feature name
value = array of bin edges used for discretization, taken from
RF split values
missing_rf_cols_ : array-like
List of features that were not used in RF
backup_discretizer_ : object of class BasicDiscretizer()
Discretization method used to bin numeric data for features
in missing_rf_cols_
onehot_ : object of class OneHotEncoder()
One hot encoding fit. Ignored if encode != 'onehot'
| class RFDiscretizer(AbstractDiscretizer):
"""
Discretize numeric data into bins using RF splits.
Parameters
----------
rf_model : RandomForestClassifer() or RandomForestRegressor()
RF model from which to extract splits for discretization.
Default is RandomForestClassifer(n_estimators = 500) or
RandomForestRegressor(n_estimators = 500)
classification : boolean; default=False
Used only if rf_model=None. If True,
rf_model=RandomForestClassifier(n_estimators = 500).
Else, rf_model=RandomForestRegressor(n_estimators = 500)
n_bins : int or array-like of shape (len(dcols),), default=2
Number of bins to discretize each feature into.
dcols : list of strings
The names of the columns to be discretized; by default,
discretize all float and int columns in X.
encode : {‘onehot’, ‘ordinal’}, default=’onehot’
Method used to encode the transformed result.
onehot - Encode the transformed result with one-hot encoding and
return a dense array.
ordinal - Return the bin identifier encoded as an integer value.
strategy : {‘uniform’, ‘quantile’}, default=’quantile’
Strategy used to choose RF split points.
uniform - RF split points chosen to be uniformly spaced out.
quantile - RF split points chosen based on equally-spaced quantiles.
backup_strategy : {‘uniform’, ‘quantile’, ‘kmeans’}, default=’quantile’
Strategy used to define the widths of the bins if no rf splits exist for
that feature. Used in KBinsDiscretizer.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D
k-means cluster.
onehot_drop : {‘first’, ‘if_binary’} or array-like of shape (len(dcols),), default='if_binary'
Specifies a methodology to use to drop one of the categories
per feature when encode = "onehot".
None
Retain all features (the default).
‘first’
Drop the first y_str in each feature. If only one y_str
is present, the feature will be dropped entirely.
‘if_binary’
Drop the first y_str in each feature with two categories.
Features with 1 or more than 2 categories are left intact.
Attributes
----------
rf_splits : dictionary where
key = feature name
value = array of all RF split threshold values
bin_edges_ : dictionary where
key = feature name
value = array of bin edges used for discretization, taken from
RF split values
missing_rf_cols_ : array-like
List of features that were not used in RF
backup_discretizer_ : object of class BasicDiscretizer()
Discretization method used to bin numeric data for features
in missing_rf_cols_
onehot_ : object of class OneHotEncoder()
One hot encoding fit. Ignored if encode != 'onehot'
"""
def __init__(self, rf_model=None, classification=False,
n_bins=2, dcols=[], encode='onehot',
strategy='quantile', backup_strategy='quantile',
onehot_drop='if_binary'):
super().__init__(n_bins=n_bins, dcols=dcols,
encode=encode, strategy=strategy,
onehot_drop=onehot_drop)
self.backup_strategy = backup_strategy
self.rf_model = rf_model
if rf_model is None:
self.classification = classification
def _validate_args(self):
"""
Check if encode, strategy, backup_strategy arguments are valid.
"""
super()._validate_args()
valid_backup_strategy = ('uniform', 'quantile', 'kmeans')
if (self.backup_strategy not in valid_backup_strategy):
raise ValueError("Valid options for 'strategy' are {}. Got strategy={!r} instead."
.format(valid_backup_strategy, self.backup_strategy))
def _get_rf_splits(self, col_names):
"""
Get all splits in random forest ensemble
Parameters
----------
col_names : array-like of shape (n_features,)
Column names for X used to train rf_model
Returns
-------
rule_dict : dictionary where
key = feature name
value = array of all RF split threshold values
"""
rule_dict = {}
for model in self.rf_model.estimators_:
tree = model.tree_
tree_it = enumerate(zip(tree.children_left,
tree.children_right,
tree.feature,
tree.threshold))
for node_idx, data in tree_it:
left, right, feature, th = data
if (left != -1) | (right != -1):
feature = col_names[feature]
if feature in rule_dict:
rule_dict[feature].append(th)
else:
rule_dict[feature] = [th]
return rule_dict
def _fit_rf(self, X, y=None):
"""
Fit random forest (if necessary) and obtain RF split thresholds
Parameters
----------
X : data frame of shape (n_samples, n_features)
Training data used to fit RF
y : array-like of shape (n_samples,)
Training response vector used to fit RF
Returns
-------
rf_splits : dictionary where
key = feature name
value = array of all RF split threshold values
"""
# If no rf_model given, train default random forest model
if self.rf_model is None:
if y is None:
raise ValueError("Must provide y if rf_model is not given.")
if self.classification:
self.rf_model = RandomForestClassifier(n_estimators=500)
else:
self.rf_model = RandomForestRegressor(n_estimators=500)
self.rf_model.fit(X, y)
else:
# provided rf model has not yet been trained
if not check_is_fitted(self.rf_model):
if y is None:
raise ValueError(
"Must provide y if rf_model has not been trained.")
self.rf_model.fit(X, y)
# get all random forest split points
self.rf_splits = self._get_rf_splits(list(X.columns))
def reweight_n_bins(self, X, y=None, by="nsplits"):
"""
Reallocate number of bins per feature.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
by : {'nsplits'}, default='nsplits'
Specifies how to reallocate number of bins per feature.
nsplits
Reallocate number of bins so that each feature
in dcols get at a minimum of 2 bins with the
remaining bins distributed proportionally to the
number of RF splits using that feature
Returns
-------
self.n_bins : array of shape (len(dcols),)
number of bins per feature reallocated according to
'by' argument
"""
# initialization and error checking
self._fit_preprocessing(X)
# get all random forest split points
self._fit_rf(X=X, y=y)
# get total number of bins to reallocate
total_bins = self.n_bins.sum()
# reweight n_bins
if by == "nsplits":
# each col gets at least 2 bins; remaining bins get
# reallocated based on number of RF splits using that feature
n_rules = np.array([len(self.rf_splits[col])
for col in self.dcols_])
self.n_bins = np.round(n_rules / n_rules.sum() *
(total_bins - 2 * len(self.dcols_))) + 2
else:
valid_by = ('nsplits')
raise ValueError("Valid options for 'by' are {}. Got by={!r} instead."
.format(valid_by, by))
def fit(self, X, y=None):
"""
Fit the estimator.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
Returns
-------
self
"""
# initialization and error checking
self._fit_preprocessing(X)
# get all random forest split points
self._fit_rf(X=X, y=y)
# features that were not used in the rf but need to be discretized
self.missing_rf_cols_ = list(set(self.dcols_) -
set(self.rf_splits.keys()))
if len(self.missing_rf_cols_) > 0:
print("{} did not appear in random forest so were discretized via {} discretization"
.format(self.missing_rf_cols_, self.strategy))
missing_n_bins = np.array([self.n_bins[np.array(self.dcols_) == col][0]
for col in self.missing_rf_cols_])
backup_discretizer = BasicDiscretizer(n_bins=missing_n_bins,
dcols=self.missing_rf_cols_,
encode='ordinal',
strategy=self.backup_strategy)
backup_discretizer.fit(X[self.missing_rf_cols_])
self.backup_discretizer_ = backup_discretizer
else:
self.backup_discretizer_ = None
if self.encode == 'onehot':
if len(self.missing_rf_cols_) > 0:
discretized_df = backup_discretizer.transform(
X[self.missing_rf_cols_])
else:
discretized_df = pd.DataFrame({}, index=X.index)
# do discretization based on rf split thresholds
self.bin_edges_ = dict()
for col in self.dcols_:
if col in self.rf_splits.keys():
b = self.n_bins[np.array(self.dcols_) == col]
if self.strategy == "quantile":
q_values = np.linspace(0, 1, int(b) + 1)
bin_edges = np.quantile(self.rf_splits[col], q_values)
elif self.strategy == "uniform":
width = (max(self.rf_splits[col]) -
min(self.rf_splits[col])) / b
bin_edges = width * \
np.arange(0, b + 1) + min(self.rf_splits[col])
self.bin_edges_[col] = bin_edges
if self.encode == 'onehot':
discretized_df[col] = self._discretize_to_bins(
X[col], bin_edges)
# fit onehot encoded X if specified
if self.encode == "onehot":
onehot = OneHotEncoder(drop=self.onehot_drop) # , sparse=False)
onehot.fit(discretized_df[self.dcols_].astype(str))
self.onehot_ = onehot
return self
def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : data frame of shape (n_samples, n_features)
Data to be discretized.
Returns
-------
X_discretized : data frame
Data with features in dcols transformed to the
binned space. All other features remain unchanged.
"""
check_is_fitted(self)
# transform features that did not appear in RF
if len(self.missing_rf_cols_) > 0:
discretized_df = self.backup_discretizer_.transform(
X[self.missing_rf_cols_])
discretized_df = pd.DataFrame(discretized_df,
columns=self.missing_rf_cols_,
index=X.index)
else:
discretized_df = pd.DataFrame({}, index=X.index)
# do discretization based on rf split thresholds
for col in self.bin_edges_.keys():
discretized_df[col] = self._discretize_to_bins(
X[col], self.bin_edges_[col])
# return onehot encoded data if specified and
# join discretized columns with rest of X
X_discretized = self._transform_postprocessing(discretized_df, X)
return X_discretized
| (rf_model=None, classification=False, n_bins=2, dcols=[], encode='onehot', strategy='quantile', backup_strategy='quantile', onehot_drop='if_binary') |
17,702 | imodels.discretization.discretizer | __init__ | null | def __init__(self, rf_model=None, classification=False,
n_bins=2, dcols=[], encode='onehot',
strategy='quantile', backup_strategy='quantile',
onehot_drop='if_binary'):
super().__init__(n_bins=n_bins, dcols=dcols,
encode=encode, strategy=strategy,
onehot_drop=onehot_drop)
self.backup_strategy = backup_strategy
self.rf_model = rf_model
if rf_model is None:
self.classification = classification
| (self, rf_model=None, classification=False, n_bins=2, dcols=[], encode='onehot', strategy='quantile', backup_strategy='quantile', onehot_drop='if_binary') |
17,710 | imodels.discretization.discretizer | _fit_rf |
Fit random forest (if necessary) and obtain RF split thresholds
Parameters
----------
X : data frame of shape (n_samples, n_features)
Training data used to fit RF
y : array-like of shape (n_samples,)
Training response vector used to fit RF
Returns
-------
rf_splits : dictionary where
key = feature name
value = array of all RF split threshold values
| def _fit_rf(self, X, y=None):
"""
Fit random forest (if necessary) and obtain RF split thresholds
Parameters
----------
X : data frame of shape (n_samples, n_features)
Training data used to fit RF
y : array-like of shape (n_samples,)
Training response vector used to fit RF
Returns
-------
rf_splits : dictionary where
key = feature name
value = array of all RF split threshold values
"""
# If no rf_model given, train default random forest model
if self.rf_model is None:
if y is None:
raise ValueError("Must provide y if rf_model is not given.")
if self.classification:
self.rf_model = RandomForestClassifier(n_estimators=500)
else:
self.rf_model = RandomForestRegressor(n_estimators=500)
self.rf_model.fit(X, y)
else:
# provided rf model has not yet been trained
if not check_is_fitted(self.rf_model):
if y is None:
raise ValueError(
"Must provide y if rf_model has not been trained.")
self.rf_model.fit(X, y)
# get all random forest split points
self.rf_splits = self._get_rf_splits(list(X.columns))
| (self, X, y=None) |
17,713 | imodels.discretization.discretizer | _get_rf_splits |
Get all splits in random forest ensemble
Parameters
----------
col_names : array-like of shape (n_features,)
Column names for X used to train rf_model
Returns
-------
rule_dict : dictionary where
key = feature name
value = array of all RF split threshold values
| def _get_rf_splits(self, col_names):
"""
Get all splits in random forest ensemble
Parameters
----------
col_names : array-like of shape (n_features,)
Column names for X used to train rf_model
Returns
-------
rule_dict : dictionary where
key = feature name
value = array of all RF split threshold values
"""
rule_dict = {}
for model in self.rf_model.estimators_:
tree = model.tree_
tree_it = enumerate(zip(tree.children_left,
tree.children_right,
tree.feature,
tree.threshold))
for node_idx, data in tree_it:
left, right, feature, th = data
if (left != -1) | (right != -1):
feature = col_names[feature]
if feature in rule_dict:
rule_dict[feature].append(th)
else:
rule_dict[feature] = [th]
return rule_dict
| (self, col_names) |
17,719 | imodels.discretization.discretizer | _validate_args |
Check if encode, strategy, backup_strategy arguments are valid.
| def _validate_args(self):
"""
Check if encode, strategy, backup_strategy arguments are valid.
"""
super()._validate_args()
valid_backup_strategy = ('uniform', 'quantile', 'kmeans')
if (self.backup_strategy not in valid_backup_strategy):
raise ValueError("Valid options for 'strategy' are {}. Got strategy={!r} instead."
.format(valid_backup_strategy, self.backup_strategy))
| (self) |
17,724 | imodels.discretization.discretizer | fit |
Fit the estimator.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
Returns
-------
self
| def fit(self, X, y=None):
"""
Fit the estimator.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
Returns
-------
self
"""
# initialization and error checking
self._fit_preprocessing(X)
# get all random forest split points
self._fit_rf(X=X, y=y)
# features that were not used in the rf but need to be discretized
self.missing_rf_cols_ = list(set(self.dcols_) -
set(self.rf_splits.keys()))
if len(self.missing_rf_cols_) > 0:
print("{} did not appear in random forest so were discretized via {} discretization"
.format(self.missing_rf_cols_, self.strategy))
missing_n_bins = np.array([self.n_bins[np.array(self.dcols_) == col][0]
for col in self.missing_rf_cols_])
backup_discretizer = BasicDiscretizer(n_bins=missing_n_bins,
dcols=self.missing_rf_cols_,
encode='ordinal',
strategy=self.backup_strategy)
backup_discretizer.fit(X[self.missing_rf_cols_])
self.backup_discretizer_ = backup_discretizer
else:
self.backup_discretizer_ = None
if self.encode == 'onehot':
if len(self.missing_rf_cols_) > 0:
discretized_df = backup_discretizer.transform(
X[self.missing_rf_cols_])
else:
discretized_df = pd.DataFrame({}, index=X.index)
# do discretization based on rf split thresholds
self.bin_edges_ = dict()
for col in self.dcols_:
if col in self.rf_splits.keys():
b = self.n_bins[np.array(self.dcols_) == col]
if self.strategy == "quantile":
q_values = np.linspace(0, 1, int(b) + 1)
bin_edges = np.quantile(self.rf_splits[col], q_values)
elif self.strategy == "uniform":
width = (max(self.rf_splits[col]) -
min(self.rf_splits[col])) / b
bin_edges = width * \
np.arange(0, b + 1) + min(self.rf_splits[col])
self.bin_edges_[col] = bin_edges
if self.encode == 'onehot':
discretized_df[col] = self._discretize_to_bins(
X[col], bin_edges)
# fit onehot encoded X if specified
if self.encode == "onehot":
onehot = OneHotEncoder(drop=self.onehot_drop) # , sparse=False)
onehot.fit(discretized_df[self.dcols_].astype(str))
self.onehot_ = onehot
return self
| (self, X, y=None) |
17,728 | imodels.discretization.discretizer | reweight_n_bins |
Reallocate number of bins per feature.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
by : {'nsplits'}, default='nsplits'
Specifies how to reallocate number of bins per feature.
nsplits
Reallocate number of bins so that each feature
in dcols get at a minimum of 2 bins with the
remaining bins distributed proportionally to the
number of RF splits using that feature
Returns
-------
self.n_bins : array of shape (len(dcols),)
number of bins per feature reallocated according to
'by' argument
| def reweight_n_bins(self, X, y=None, by="nsplits"):
"""
Reallocate number of bins per feature.
Parameters
----------
X : data frame of shape (n_samples, n_features)
(Training) data to be discretized.
y : array-like of shape (n_samples,)
(Training) response vector. Required only if
rf_model = None or rf_model has not yet been fitted
by : {'nsplits'}, default='nsplits'
Specifies how to reallocate number of bins per feature.
nsplits
Reallocate number of bins so that each feature
in dcols get at a minimum of 2 bins with the
remaining bins distributed proportionally to the
number of RF splits using that feature
Returns
-------
self.n_bins : array of shape (len(dcols),)
number of bins per feature reallocated according to
'by' argument
"""
# initialization and error checking
self._fit_preprocessing(X)
# get all random forest split points
self._fit_rf(X=X, y=y)
# get total number of bins to reallocate
total_bins = self.n_bins.sum()
# reweight n_bins
if by == "nsplits":
# each col gets at least 2 bins; remaining bins get
# reallocated based on number of RF splits using that feature
n_rules = np.array([len(self.rf_splits[col])
for col in self.dcols_])
self.n_bins = np.round(n_rules / n_rules.sum() *
(total_bins - 2 * len(self.dcols_))) + 2
else:
valid_by = ('nsplits')
raise ValueError("Valid options for 'by' are {}. Got by={!r} instead."
.format(valid_by, by))
| (self, X, y=None, by='nsplits') |
17,732 | imodels.util.rule | Rule | An object modeling a logical rule and add factorization methods.
It is used to simplify rules and deduplicate them.
Parameters
----------
rule : str
The logical rule that is interpretable by a pandas query.
args : object, optional
Arguments associated to the rule, it is not used for factorization
but it takes part of the output when the rule is converted to an array.
| class Rule:
""" An object modeling a logical rule and add factorization methods.
It is used to simplify rules and deduplicate them.
Parameters
----------
rule : str
The logical rule that is interpretable by a pandas query.
args : object, optional
Arguments associated to the rule, it is not used for factorization
but it takes part of the output when the rule is converted to an array.
"""
def __init__(self, rule, args=None, support=None):
self.rule = rule
self.args = args
self.support = support
self.terms = [t.split(' ') for t in self.rule.split(' and ')]
self.agg_dict = {}
self.factorize()
self.rule = str(self)
def __eq__(self, other):
return self.agg_dict == other.agg_dict
def __hash__(self):
# FIXME : Easier method ?
return hash(tuple(sorted(((i, j) for i, j in self.agg_dict.items()))))
def factorize(self) -> None:
for feature, symbol, value in self.terms:
if (feature, symbol) not in self.agg_dict:
if symbol != '==':
self.agg_dict[(feature, symbol)] = str(float(value))
else:
self.agg_dict[(feature, symbol)] = value
else:
if symbol[0] == '<':
self.agg_dict[(feature, symbol)] = str(min(
float(self.agg_dict[(feature, symbol)]),
float(value)))
elif symbol[0] == '>':
self.agg_dict[(feature, symbol)] = str(max(
float(self.agg_dict[(feature, symbol)]),
float(value)))
else: # Handle the c0 == c0 case
self.agg_dict[(feature, symbol)] = value
def __iter__(self):
yield str(self)
yield self.args
def __repr__(self):
return ' and '.join([' '.join(
[feature, symbol, str(self.agg_dict[(feature, symbol)])])
for feature, symbol in sorted(self.agg_dict.keys())
])
| (rule, args=None, support=None) |
17,733 | imodels.util.rule | __eq__ | null | def __eq__(self, other):
return self.agg_dict == other.agg_dict
| (self, other) |
17,734 | imodels.util.rule | __hash__ | null | def __hash__(self):
# FIXME : Easier method ?
return hash(tuple(sorted(((i, j) for i, j in self.agg_dict.items()))))
| (self) |
17,735 | imodels.util.rule | __init__ | null | def __init__(self, rule, args=None, support=None):
self.rule = rule
self.args = args
self.support = support
self.terms = [t.split(' ') for t in self.rule.split(' and ')]
self.agg_dict = {}
self.factorize()
self.rule = str(self)
| (self, rule, args=None, support=None) |
17,736 | imodels.util.rule | __iter__ | null | def __iter__(self):
yield str(self)
yield self.args
| (self) |
17,737 | imodels.util.rule | __repr__ | null | def __repr__(self):
return ' and '.join([' '.join(
[feature, symbol, str(self.agg_dict[(feature, symbol)])])
for feature, symbol in sorted(self.agg_dict.keys())
])
| (self) |
17,738 | imodels.util.rule | factorize | null | def factorize(self) -> None:
for feature, symbol, value in self.terms:
if (feature, symbol) not in self.agg_dict:
if symbol != '==':
self.agg_dict[(feature, symbol)] = str(float(value))
else:
self.agg_dict[(feature, symbol)] = value
else:
if symbol[0] == '<':
self.agg_dict[(feature, symbol)] = str(min(
float(self.agg_dict[(feature, symbol)]),
float(value)))
elif symbol[0] == '>':
self.agg_dict[(feature, symbol)] = str(max(
float(self.agg_dict[(feature, symbol)]),
float(value)))
else: # Handle the c0 == c0 case
self.agg_dict[(feature, symbol)] = value
| (self) -> NoneType |
17,739 | imodels.rule_set.rule_fit | RuleFitClassifier | null | class RuleFitClassifier(RuleFit, ClassifierMixin):
...
| (n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, cv=True, random_state=None) |
17,741 | imodels.rule_set.rule_fit | __init__ | null | def __init__(self,
n_estimators=100,
tree_size=4,
sample_fract='default',
max_rules=30,
memory_par=0.01,
tree_generator=None,
lin_trim_quantile=0.025,
lin_standardise=True,
exp_rand_tree_size=True,
include_linear=True,
alpha=None,
cv=True,
random_state=None):
self.n_estimators = n_estimators
self.tree_size = tree_size
self.sample_fract = sample_fract
self.max_rules = max_rules
self.memory_par = memory_par
self.tree_generator = tree_generator
self.lin_trim_quantile = lin_trim_quantile
self.lin_standardise = lin_standardise
self.exp_rand_tree_size = exp_rand_tree_size
self.include_linear = include_linear
self.alpha = alpha
self.cv = cv
self.random_state = random_state
self.winsorizer = Winsorizer(trim_quantile=self.lin_trim_quantile)
self.friedscale = FriedScale(self.winsorizer)
self.stddev = None
self.mean = None
| (self, n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, cv=True, random_state=None) |
17,749 | imodels.rule_set.rule_fit | _extract_rules | null | def _extract_rules(self, X, y) -> List[str]:
return extract_rulefit(X, y,
feature_names=self.feature_placeholders,
n_estimators=self.n_estimators,
tree_size=self.tree_size,
memory_par=self.memory_par,
tree_generator=self.tree_generator,
exp_rand_tree_size=self.exp_rand_tree_size,
random_state=self.random_state)
| (self, X, y) -> List[str] |
17,763 | imodels.rule_set.rule_fit | fit | Fit and estimate linear combination of rule ensemble
| def fit(self, X, y=None, feature_names=None):
"""Fit and estimate linear combination of rule ensemble
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
self.n_features_ = X.shape[1]
self.feature_dict_ = get_feature_dict(X.shape[1], feature_names)
self.feature_placeholders = np.array(list(self.feature_dict_.keys()))
self.feature_names = np.array(list(self.feature_dict_.values()))
extracted_rules = self._extract_rules(X, y)
self.rules_without_feature_names_, self.coef, self.intercept = self._score_rules(
X, y, extracted_rules)
self.rules_ = [
replace_feature_name(rule, self.feature_dict_) for rule in self.rules_without_feature_names_
]
# count total rule terms, plus nonzero linear terms
self.complexity_ = self._get_complexity()
if self.include_linear:
self.complexity_ += np.sum(
np.array(self.coef[:X.shape[1]]) != 0)
return self
| (self, X, y=None, feature_names=None) |
17,777 | imodels.rule_set.rule_fit | RuleFitRegressor | null | class RuleFitRegressor(RuleFit, RegressorMixin):
...
| (n_estimators=100, tree_size=4, sample_fract='default', max_rules=30, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, cv=True, random_state=None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.