code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__}) | def check_is_fitted(estimator, attributes, msg=None, all_or_any=all) | Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found. | 1.61315 | 1.764779 | 0.914081 |
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b) | def safe_sparse_dot(a, b, dense_output=False) | Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
Parameters
----------
a : array or sparse matrix
b : array or sparse matrix
dense_output : boolean, default False
When False, either ``a`` or ``b`` being sparse will yield sparse
output. When True, output will always be an array.
Returns
-------
dot_product : array or sparse matrix
sparse if ``a`` or ``b`` is sparse and ``dense_output=False``. | 2.347307 | 2.978354 | 0.788122 |
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
X_bin = self._transform_data(X)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X_bin.shape
if n_features_X != n_features:
raise ValueError(
"Expected input with %d features, got %d instead" %
(n_features, n_features_X))
# see chapter 4.1 of http://www.cs.columbia.edu/~mcollins/em.pdf
# implementation as in Formula 4.
jll = safe_sparse_dot(X_bin, self.feature_log_prob_.T)
jll += self.class_log_prior_
return jll | def _joint_log_likelihood(self, X) | Calculate the posterior log probability of the samples X | 3.3223 | 3.322658 | 0.999892 |
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)] | def predict(self, X) | Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X | 5.175931 | 7.980291 | 0.648589 |
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1) # return shape = (2,)
return jll - np.atleast_2d(log_prob_x).T | def predict_log_proba(self, X) | Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`. | 4.892324 | 6.402531 | 0.764124 |
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
for i in range(X.shape[1]):
# initialise binarizer and save
binarizer = LabelBinarizer()
# fit the data to the binarizer
binarizer.fit(X[:, i])
self._binarizers.append(binarizer)
return self._transform_data(X) | def _fit_data(self, X) | Binarize the data for each column separately.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_transformed : array-like
Returns the data where in each columns the labels are
binarized. | 3.355282 | 3.309823 | 1.013735 |
if self._binarizers == []:
raise NotFittedError()
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
if len(self._binarizers) != X.shape[1]:
raise ValueError(
"Expected input with %d features, got %d instead" %
(len(self._binarizers), X.shape[1]))
X_parts = []
for i in range(X.shape[1]):
X_i = self._binarizers[i].transform(X[:, i])
# sklearn returns ndarray with shape (samples, 1) on binary input.
if self._binarizers[i].classes_.shape[0] == 2:
X_parts.append(1 - X_i)
X_parts.append(X_i)
return np.concatenate(X_parts, axis=1) | def _transform_data(self, X) | Binarize the data for each column separately. | 2.830775 | 2.686101 | 1.05386 |
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0) | def _count(self, X, Y) | Count and smooth feature occurrences. | 5.343689 | 4.534741 | 1.178389 |
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1))) | def _update_feature_log_prob(self, alpha) | Apply smoothing to raw counts and recompute log probabilities | 3.801997 | 3.238249 | 1.174091 |
X, y = check_X_y(X, y, 'csr')
# Transform data with a label binarizer. Each column will get
# transformed into a N columns (for each distinct value a column). For
# a situation with 0 and 1 outcome values, the result given two
# columns.
X_bin = self._fit_data(X)
_, n_features = X_bin.shape
# prepare Y
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X_bin, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self | def fit(self, X, y, sample_weight=None) | Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object | 4.235739 | 4.51453 | 0.938246 |
_, n_features = X_bin.shape
# The parameter class_log_prior_ has shape (2,). The values represent
# 'match' and 'non-match'.
rand_vals = np.random.rand(2)
class_prior = rand_vals / np.sum(rand_vals)
# make empty array of feature log probs
# dimensions 2xn_features
feature_prob = np.zeros((2, n_features))
feat_i = 0
for i, bin in enumerate(self._binarizers):
bin_len = bin.classes_.shape[0]
rand_vals_0 = np.random.rand(bin_len)
feature_prob[0, feat_i:feat_i + bin_len] = \
rand_vals_0 / np.sum(rand_vals_0)
rand_vals_1 = np.random.rand(bin_len)
feature_prob[1, feat_i:feat_i + bin_len] = \
rand_vals_1 / np.sum(rand_vals_1)
feat_i += bin_len
return np.log(class_prior), np.log(feature_prob) | def _init_parameters_random(self, X_bin) | Initialise parameters for unsupervised learning. | 2.933282 | 2.832074 | 1.035736 |
_, n_features = X_bin.shape
class_prior = [.9, .1]
feature_prob = np.zeros((2, n_features))
for i, bin in enumerate(self._binarizers):
if bin.classes_.shape[0] != 2:
raise ValueError("Only binary labels are allowed for "
"'jaro'method. "
"Column {} has {} different labels.".format(
i, bin.classes_.shape[0]))
# TODO: ensure classes are [0, 1] (not [1, 0])
# TODO: check with bin.y_type_
feature_prob[0, :] = np.tile([.9, .1], int(n_features / 2))
feature_prob[1, :] = np.tile([.1, .9], int(n_features / 2))
return np.log(class_prior), np.log(feature_prob) | def _init_parameters_jaro(self, X_bin) | Initialise parameters for unsupervised learning. | 3.926342 | 3.864623 | 1.01597 |
X = check_array(X, accept_sparse='csr')
# count frequencies of elements in vector space
# based on https://stackoverflow.com/a/33235665
# faster than numpy.unique
X_unique, X_freq = np.unique(X, axis=0, return_counts=True)
X_freq = np.atleast_2d(X_freq)
# Transform data with a label binarizer. Each column will get
# transformed into a N columns (for each distinct value a column). For
# a situation with 0 and 1 outcome values, the result given two
# columns.
X_unique_bin = self._fit_data(X_unique)
_, n_features = X_unique_bin.shape
# initialise parameters
self.classes_ = np.array([0, 1])
if is_string_like(self.init) and self.init == 'random':
self.class_log_prior_, self.feature_log_prob_ = \
self._init_parameters_random(X_unique_bin)
elif is_string_like(self.init) and self.init == 'jaro':
self.class_log_prior_, self.feature_log_prob_ = \
self._init_parameters_jaro(X_unique_bin)
else:
raise ValueError("'{}' is not a valid value for "
"argument 'init'".format(self.init))
iteration = 0
stop_iteration = False
self._log_class_log_prior = np.atleast_2d(self.class_log_prior_)
self._log_feature_log_prob = np.atleast_3d(self.feature_log_prob_)
while iteration < self.max_iter and not stop_iteration:
# expectation step
g = self.predict_proba(X_unique)
g_freq = g * X_freq.T
g_freq_sum = g_freq.sum(axis=0)
# maximisation step
class_log_prior_ = np.log(g_freq_sum) - np.log(X.shape[0]) # p
feature_log_prob_ = np.log(safe_sparse_dot(g_freq.T, X_unique_bin))
feature_log_prob_ -= np.log(np.atleast_2d(g_freq_sum).T)
# Stop iterating when the class prior and feature probs are close
# to the values in the to previous iteration (parameters starting
# with 'self').
class_log_prior_close = np.allclose(
class_log_prior_, self.class_log_prior_, atol=self.atol)
feature_log_prob_close = np.allclose(
feature_log_prob_, self.feature_log_prob_, atol=self.atol)
if (class_log_prior_close and feature_log_prob_close):
stop_iteration = True
if np.all(np.isnan(feature_log_prob_)):
stop_iteration = True
# Update the class prior and feature probs.
self.class_log_prior_ = class_log_prior_
self.feature_log_prob_ = feature_log_prob_
# create logs
self._log_class_log_prior = np.concatenate(
[self._log_class_log_prior,
np.atleast_2d(self.class_log_prior_)]
)
self._log_feature_log_prob = np.concatenate(
[self._log_feature_log_prob,
np.atleast_3d(self.feature_log_prob_)], axis=2
)
# Increment counter
iteration += 1
return self | def fit(self, X) | Fit ECM classifier according to X
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self. | 2.682482 | 2.753159 | 0.974329 |
concat_arrays = numpy.concatenate([array1, array2])
unique_values = numpy.unique(concat_arrays)
return numpy.sort(unique_values) | def _get_sorting_key_values(self, array1, array2) | return the sorting key values as a series | 3.688311 | 3.716073 | 0.992529 |
try:
import networkx as nx
except ImportError():
raise Exception("'networkx' module is needed for this operation")
G = nx.Graph()
G.add_edges_from(links.values)
connected_components = nx.connected_component_subgraphs(G)
links_result = [pd.MultiIndex.from_tuples(subgraph.edges())
for subgraph in connected_components]
return links_result | def compute(self, links) | Return the connected components.
Parameters
----------
links : pandas.MultiIndex
The links to apply one-to-one matching on.
Returns
-------
list of pandas.MultiIndex
A list with pandas.MultiIndex objects. Each MultiIndex
object represents a set of connected record pairs. | 3.726181 | 2.861037 | 1.302388 |
# compute the probabilities
probs = self.kernel.predict_proba(features)
# get the position of match probabilities
classes = list(self.kernel.classes_)
match_class_position = classes.index(1)
return probs[:, match_class_position] | def _prob_match(self, features) | Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties. | 5.287089 | 5.294407 | 0.998618 |
from sklearn.exceptions import NotFittedError
try:
prediction = self.kernel.predict_classes(features)[:, 0]
except NotFittedError:
raise NotFittedError(
"{} is not fitted yet. Call 'fit' with appropriate "
"arguments before using this method.".format(
type(self).__name__
)
)
return prediction | def _predict(self, features) | Predict matches and non-matches.
Parameters
----------
features : numpy.ndarray
The data to predict the class of.
Returns
-------
numpy.ndarray
The predicted classes. | 2.929927 | 2.902962 | 1.009289 |
index = df.index.to_series()
keys = index.str.extract(r'rec-(\d+)', expand=True)[0]
index_int = numpy.arange(len(df))
df_helper = pandas.DataFrame({
'key': keys,
'index': index_int
})
# merge the two frame and make MultiIndex.
pairs_df = df_helper.merge(
df_helper, on='key'
)[['index_x', 'index_y']]
pairs_df = pairs_df[pairs_df['index_x'] > pairs_df['index_y']]
return pandas.MultiIndex(
levels=[df.index.values, df.index.values],
labels=[pairs_df['index_x'].values, pairs_df['index_y'].values],
names=[None, None],
verify_integrity=False
) | def _febrl_links(df) | Get the links of a FEBRL dataset. | 3.462474 | 3.353767 | 1.032413 |
df = _febrl_load_data('dataset1.csv')
if return_links:
links = _febrl_links(df)
return df, links
else:
return df | def load_febrl1(return_links=False) | Load the FEBRL 1 dataset.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the first Febrl dataset
as a :class:`pandas.DataFrame`.
*"This data set contains 1000 records (500 original and
500 duplicates, with exactly one duplicate per original
record."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
pandas.DataFrame
A :class:`pandas.DataFrame` with Febrl dataset1.csv. When
return_links is True, the function returns also the true
links. The true links are all links in the lower triangular
part of the matrix. | 3.420375 | 3.719585 | 0.919558 |
df = _febrl_load_data('dataset2.csv')
if return_links:
links = _febrl_links(df)
return df, links
else:
return df | def load_febrl2(return_links=False) | Load the FEBRL 2 dataset.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the second Febrl dataset
as a :class:`pandas.DataFrame`.
*"This data set contains 5000 records (4000 originals and
1000 duplicates), with a maximum of 5 duplicates based on
one original record (and a poisson distribution of
duplicate records). Distribution of duplicates:
19 originals records have 5 duplicate records
47 originals records have 4 duplicate records
107 originals records have 3 duplicate records
141 originals records have 2 duplicate records
114 originals records have 1 duplicate record
572 originals records have no duplicate record"*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
pandas.DataFrame
A :class:`pandas.DataFrame` with Febrl dataset2.csv. When
return_links is True, the function returns also the true
links. The true links are all links in the lower triangular
part of the matrix. | 3.502085 | 3.759706 | 0.931479 |
df = _febrl_load_data('dataset3.csv')
if return_links:
links = _febrl_links(df)
return df, links
else:
return df | def load_febrl3(return_links=False) | Load the FEBRL 3 dataset.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the third Febrl dataset
as a :class:`pandas.DataFrame`.
*"This data set contains 5000 records (2000 originals and
3000 duplicates), with a maximum of 5 duplicates based on
one original record (and a Zipf distribution of duplicate
records). Distribution of duplicates:
168 originals records have 5 duplicate records
161 originals records have 4 duplicate records
212 originals records have 3 duplicate records
256 originals records have 2 duplicate records
368 originals records have 1 duplicate record
1835 originals records have no duplicate record"*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
pandas.DataFrame
A :class:`pandas.DataFrame` with Febrl dataset3.csv. When
return_links is True, the function returns also the true
links. The true links are all links in the lower triangular
part of the matrix. | 3.459276 | 3.797715 | 0.910884 |
df_a = _febrl_load_data('dataset4a.csv')
df_b = _febrl_load_data('dataset4b.csv')
if return_links:
links = pandas.MultiIndex.from_arrays([
["rec-{}-org".format(i) for i in range(0, 5000)],
["rec-{}-dup-0".format(i) for i in range(0, 5000)]]
)
return df_a, df_b, links
else:
return df_a, df_b | def load_febrl4(return_links=False) | Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with 10000 records (5000
originals and 5000 duplicates, with one duplicate per
original), the originals have been split from the
duplicates, into dataset4a.csv (containing the 5000
original records) and dataset4b.csv (containing the
5000 duplicate records) These two data sets can be
used for testing linkage procedures."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
(pandas.DataFrame, pandas.DataFrame)
A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas
dataframe with Febrl dataset4b.csv. When return_links is True,
the function returns also the true links. | 3.108817 | 2.606197 | 1.192856 |
# If the data is not found, download it.
for i in range(1, 11):
filepath = os.path.join(os.path.dirname(__file__),
'krebsregister', 'block_{}.zip'.format(i))
if not os.path.exists(filepath):
_download_krebsregister()
break
if isinstance(block, (list, tuple)):
data = pandas.concat([_krebsregister_block(bl) for bl in block])
else:
data = _krebsregister_block(block)
if shuffle:
data = data.sample(frac=1, random_state=535)
match_index = data.index[data['is_match']]
del data['is_match']
if pandas.notnull(missing_values):
data.fillna(missing_values, inplace=True)
return data, match_index | def load_krebsregister(block=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
missing_values=None, shuffle=True) | Load the Krebsregister dataset.
This dataset of comparison patterns was obtained in a
epidemiological cancer study in Germany. The comparison patterns
were created by the Institute for Medical Biostatistics,
Epidemiology and Informatics (IMBEI) and the University Medical
Center of Johannes Gutenberg University (Mainz, Germany). The
dataset is available for research online.
"The records represent individual data including first and
family name, sex, date of birth and postal code, which were
collected through iterative insertions in the course of
several years. The comparison patterns in this data set are
based on a sample of 100.000 records dating from 2005 to 2008.
Data pairs were classified as "match" or "non-match" during
an extensive manual review where several documentarists were
involved. The resulting classification formed the basis for
assessing the quality of the registry's own record linkage
procedure.
In order to limit the amount of patterns a blocking procedure
was applied, which selects only record pairs that meet
specific agreement conditions. The results of the following
six blocking iterations were merged together:
- Phonetic equality of first name and family name, equality of
date of birth.
- Phonetic equality of first name, equality of day of birth.
- Phonetic equality of first name, equality of month of birth.
- Phonetic equality of first name, equality of year of birth.
- Equality of complete date of birth.
- Phonetic equality of family name, equality of sex.
This procedure resulted in 5.749.132 record pairs, of which
20.931 are matches. The data set is split into 10 blocks of
(approximately) equal size and ratio of matches to
non-matches."
Parameters
----------
block : int, list
An integer or a list with integers between 1 and 10. The
blocks are the blocks explained in the description.
missing_values : object, int, float
The value of the missing values. Default NaN.
shuffle : bool
Shuffle the record pairs. Default True.
Returns
-------
(pandas.DataFrame, pandas.MultiIndex)
A pandas.DataFrame with comparison vectors and a
pandas.MultiIndex with the indices of the matches. | 2.808951 | 2.758921 | 1.018134 |
# encoding
if sys.version_info[0] == 2:
s = s.apply(
lambda x: x.decode(encoding, decode_error)
if type(x) == bytes else x)
if concat:
s = s.str.replace(r"[\-\_\s]", "")
for alg in _phonetic_algorithms:
if method in alg['argument_names']:
phonetic_callback = alg['callback']
break
else:
raise ValueError("The algorithm '{}' is not known.".format(method))
return s.str.upper().apply(
lambda x: phonetic_callback(x) if pandas.notnull(x) else np.nan
) | def phonetic(s, method, concat=True, encoding='utf-8', decode_error='strict') | Convert names or strings into phonetic codes.
The implemented algorithms are `soundex
<https://en.wikipedia.org/wiki/Soundex>`_, `nysiis
<https://en.wikipedia.org/wiki/New_York_State_Identification_and_
Intelligence_System>`_, `metaphone
<https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating
<https://en.wikipedia.org/wiki/Match_rating_approach>`_.
Parameters
----------
s : pandas.Series
A pandas.Series with string values (often names) to encode.
method: str
The algorithm that is used to phonetically encode the values.
The possible options are "soundex", "nysiis", "metaphone" or
"match_rating".
concat: bool, optional
Remove whitespace before phonetic encoding.
encoding: str, optional
If bytes are given, this encoding is used to decode. Default
is 'utf-8'.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte Series is given that
contains characters not of the given `encoding`. By default,
it is 'strict', meaning that a UnicodeDecodeError will be
raised. Other values are 'ignore' and 'replace'.
Returns
-------
pandas.Series
A Series with phonetic encoded values. | 3.830379 | 3.630778 | 1.054975 |
indexer = Block(*args, **kwargs)
self.add(indexer)
return self | def block(self, *args, **kwargs) | Add a block index.
Shortcut of :class:`recordlinkage.index.Block`::
from recordlinkage.index import Block
indexer = recordlinkage.Index()
indexer.add(Block()) | 13.941863 | 10.99478 | 1.268044 |
indexer = SortedNeighbourhood(*args, **kwargs)
self.add(indexer)
return self | def sortedneighbourhood(self, *args, **kwargs) | Add a Sorted Neighbourhood Index.
Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`::
from recordlinkage.index import SortedNeighbourhood
indexer = recordlinkage.Index()
indexer.add(SortedNeighbourhood()) | 8.51985 | 7.518176 | 1.133234 |
indexer = Random()
self.add(indexer)
return self | def random(self, *args, **kwargs) | Add a random index.
Shortcut of :class:`recordlinkage.index.Random`::
from recordlinkage.index import Random
indexer = recordlinkage.Index()
indexer.add(Random()) | 26.824036 | 17.540386 | 1.529273 |
compare = Exact(*args, **kwargs)
self.add(compare)
return self | def exact(self, *args, **kwargs) | Compare attributes of pairs exactly.
Shortcut of :class:`recordlinkage.compare.Exact`::
from recordlinkage.compare import Exact
indexer = recordlinkage.Compare()
indexer.add(Exact()) | 10.522985 | 8.074376 | 1.303257 |
compare = String(*args, **kwargs)
self.add(compare)
return self | def string(self, *args, **kwargs) | Compare attributes of pairs with string algorithm.
Shortcut of :class:`recordlinkage.compare.String`::
from recordlinkage.compare import String
indexer = recordlinkage.Compare()
indexer.add(String()) | 11.558488 | 8.248907 | 1.401214 |
compare = Numeric(*args, **kwargs)
self.add(compare)
return self | def numeric(self, *args, **kwargs) | Compare attributes of pairs with numeric algorithm.
Shortcut of :class:`recordlinkage.compare.Numeric`::
from recordlinkage.compare import Numeric
indexer = recordlinkage.Compare()
indexer.add(Numeric()) | 9.589758 | 7.75968 | 1.235844 |
compare = Geographic(*args, **kwargs)
self.add(compare)
return self | def geo(self, *args, **kwargs) | Compare attributes of pairs with geo algorithm.
Shortcut of :class:`recordlinkage.compare.Geographic`::
from recordlinkage.compare import Geographic
indexer = recordlinkage.Compare()
indexer.add(Geographic()) | 13.429975 | 7.140955 | 1.880697 |
compare = Date(*args, **kwargs)
self.add(compare)
return self | def date(self, *args, **kwargs) | Compare attributes of pairs with date algorithm.
Shortcut of :class:`recordlinkage.compare.Date`::
from recordlinkage.compare import Date
indexer = recordlinkage.Compare()
indexer.add(Date()) | 11.174714 | 8.284063 | 1.348941 |
n_max = full_index_size(*total)
if isinstance(links_pred, pandas.MultiIndex):
links_pred = len(links_pred)
if links_pred > n_max:
raise ValueError("n has to be smaller of equal n_max")
return 1 - links_pred / n_max | def reduction_ratio(links_pred, *total) | Compute the reduction ratio.
The reduction ratio is 1 minus the ratio candidate matches and the maximum
number of pairs possible.
Parameters
----------
links_pred: int, pandas.MultiIndex
The number of candidate record pairs or the pandas.MultiIndex with
record pairs.
*total: pandas.DataFrame object(s)
The DataFrames are used to compute the full index size with the
full_index_size function.
Returns
-------
float
The reduction ratio. | 5.68604 | 4.038594 | 1.407926 |
if not isinstance(shape, (tuple, list)):
x = get_length(shape)
n = int(x * (x - 1) / 2)
elif (isinstance(shape, (tuple, list)) and len(shape) == 1):
x = get_length(shape[0])
n = int(x * (x - 1) / 2)
else:
n = numpy.prod([get_length(xi) for xi in shape])
return n | def max_pairs(shape) | [DEPRECATED] Compute the maximum number of record pairs possible. | 2.590352 | 2.393543 | 1.082225 |
# check if a list or tuple is passed as argument
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = tuple(args[0])
if len(args) == 1:
n = get_length(args[0])
size = int(n * (n - 1) / 2)
else:
size = numpy.prod([get_length(arg) for arg in args])
return size | def full_index_size(*args) | Compute the number of records in a full index.
Compute the number of records in a full index without building the index
itself. The result is the maximum number of record pairs possible. This
function is especially useful in measures like the `reduction_ratio`.
Deduplication: Given a DataFrame A with length N, the full index size is
N*(N-1)/2. Linking: Given a DataFrame A with length N and a DataFrame B
with length M, the full index size is N*M.
Parameters
----------
*args: int, pandas.MultiIndex, pandas.Series, pandas.DataFrame
A pandas object or a int representing the length of a dataset to link.
When there is one argument, it is assumed that the record linkage is
a deduplication process.
Examples
--------
Use integers:
>>> full_index_size(10) # deduplication: 45 pairs
>>> full_index_size(10, 10) # linking: 100 pairs
or pandas objects
>>> full_index_size(DF) # deduplication: len(DF)*(len(DF)-1)/2 pairs
>>> full_index_size(DF, DF) # linking: len(DF)*len(DF) pairs | 2.744424 | 2.857061 | 0.960576 |
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
return len(links_true & links_pred) | def true_positives(links_true, links_pred) | Count the number of True Positives.
Returns the number of correctly predicted links, also called the number of
True Positives (TP).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
Returns
-------
int
The number of correctly predicted links. | 3.0235 | 3.699602 | 0.81725 |
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
if isinstance(total, pandas.MultiIndex):
total = len(total)
return int(total) - len(links_true | links_pred) | def true_negatives(links_true, links_pred, total) | Count the number of True Negatives.
Returns the number of correctly predicted non-links, also called the
number of True Negatives (TN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
total: int, pandas.MultiIndex
The count of all record pairs (both links and non-links). When the
argument is a pandas.MultiIndex, the length of the index is used.
Returns
-------
int
The number of correctly predicted non-links. | 3.719791 | 3.136887 | 1.185823 |
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
return len(links_pred.difference(links_true)) | def false_positives(links_true, links_pred) | Count the number of False Positives.
Returns the number of incorrect predictions of true non-links. (true non-
links, but predicted as links). This value is known as the number of False
Positives (FP).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
Returns
-------
int
The number of false positives. | 3.118781 | 3.622824 | 0.86087 |
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
return len(links_true.difference(links_pred)) | def false_negatives(links_true, links_pred) | Count the number of False Negatives.
Returns the number of incorrect predictions of true links. (true links,
but predicted as non-links). This value is known as the number of False
Negatives (FN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
Returns
-------
int
The number of false negatives. | 2.978732 | 3.470212 | 0.858372 |
links_true = _get_multiindex(links_true)
links_pred = _get_multiindex(links_pred)
tp = true_positives(links_true, links_pred)
fp = false_positives(links_true, links_pred)
fn = false_negatives(links_true, links_pred)
if total is None:
tn = numpy.nan
else:
tn = true_negatives(links_true, links_pred, total)
return numpy.array([[tp, fn], [fp, tn]]) | def confusion_matrix(links_true, links_pred, total=None) | Compute the confusion matrix.
The confusion matrix is of the following form:
+----------------------+-----------------------+----------------------+
| | Predicted Positives | Predicted Negatives |
+======================+=======================+======================+
| **True Positives** | True Positives (TP) | False Negatives (FN) |
+----------------------+-----------------------+----------------------+
| **True Negatives** | False Positives (FP) | True Negatives (TN) |
+----------------------+-----------------------+----------------------+
The confusion matrix is an informative way to analyse a prediction. The
matrix can used to compute measures like precision and recall. The count
of true prositives is [0,0], false negatives is [0,1], true negatives
is [1,1] and false positives is [1,0].
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted links.
total: int, pandas.MultiIndex
The count of all record pairs (both links and non-links). When the
argument is a pandas.MultiIndex, the length of the index is used. If
the total is None, the number of True Negatives is not computed.
Default None.
Returns
-------
numpy.array
The confusion matrix with TP, TN, FN, FP values.
Note
----
The number of True Negatives is computed based on the total argument.
This argument is the number of record pairs of the entire matrix. | 1.986081 | 2.058976 | 0.964596 |
if _isconfusionmatrix(links_true):
confusion_matrix = links_true
v = confusion_matrix[0, 0] \
/ (confusion_matrix[0, 0] + confusion_matrix[1, 0])
else:
tp = true_positives(links_true, links_pred)
fp = false_positives(links_true, links_pred)
v = tp / (tp + fp)
return float(v) | def precision(links_true, links_pred=None) | precision(links_true, links_pred)
Compute the precision.
The precision is given by TP/(TP+FP).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) collection of links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted collection of links.
Returns
-------
float
The precision | 2.983305 | 3.280338 | 0.90945 |
if _isconfusionmatrix(links_true):
confusion_matrix = links_true
v = confusion_matrix[0, 0] \
/ (confusion_matrix[0, 0] + confusion_matrix[0, 1])
else:
tp = true_positives(links_true, links_pred)
fn = false_negatives(links_true, links_pred)
v = tp / (tp + fn)
return float(v) | def recall(links_true, links_pred=None) | recall(links_true, links_pred)
Compute the recall/sensitivity.
The recall is given by TP/(TP+FN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) collection of links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted collection of links.
Returns
-------
float
The recall | 2.95214 | 3.224517 | 0.915529 |
if _isconfusionmatrix(links_true):
confusion_matrix = links_true
v = (confusion_matrix[0, 0] + confusion_matrix[1, 1]) \
/ numpy.sum(confusion_matrix)
else:
tp = true_positives(links_true, links_pred)
tn = true_negatives(links_true, links_pred, total)
v = (tp + tn) / total
return float(v) | def accuracy(links_true, links_pred=None, total=None) | accuracy(links_true, links_pred, total)
Compute the accuracy.
The accuracy is given by (TP+TN)/(TP+FP+TN+FN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) collection of links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted collection of links.
total: int, pandas.MultiIndex
The count of all record pairs (both links and non-links). When the
argument is a pandas.MultiIndex, the length of the index is used.
Returns
-------
float
The accuracy | 3.290188 | 3.465945 | 0.94929 |
if _isconfusionmatrix(links_true):
confusion_matrix = links_true
v = confusion_matrix[1, 1] / \
(confusion_matrix[1, 0] + confusion_matrix[1, 1])
else:
fp = false_positives(links_true, links_pred)
tn = true_negatives(links_true, links_pred, total)
v = tn / (fp + tn)
return float(v) | def specificity(links_true, links_pred=None, total=None) | specificity(links_true, links_pred, total)
Compute the specificity.
The specificity is given by TN/(FP+TN).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) collection of links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted collection of links.
total: int, pandas.MultiIndex
The count of all record pairs (both links and non-links). When the
argument is a pandas.MultiIndex, the length of the index is used.
Returns
-------
float
The specificity | 3.282323 | 3.422171 | 0.959135 |
prec = precision(links_true, links_pred)
rec = recall(links_true, links_pred)
return float(2 * prec * rec / (prec + rec)) | def fscore(links_true, links_pred=None) | fscore(links_true, links_pred)
Compute the F-score.
The F-score is given by 2*(precision*recall)/(precision+recall).
Parameters
----------
links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The true (or actual) collection of links.
links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series
The predicted collection of links.
Returns
-------
float
The fscore
Note
----
If there are no pairs predicted as links, this measure will raise a
ZeroDivisionError. | 2.24964 | 2.948111 | 0.763078 |
df_empty = pd.DataFrame(index=pairs)
return self._compute(
tuple([df_empty]),
tuple([df_empty])
) | def compute(self, pairs, x=None, x_link=None) | Return continuous random values for each record pair.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects. | 8.256994 | 10.683131 | 0.7729 |
return class_obj._compute(pairs, x, x_link) | def _parallel_compare_helper(class_obj, pairs, x, x_link=None) | Internal function to overcome pickling problem in python2. | 5.787649 | 5.821461 | 0.994192 |
if not isinstance(chunksize, int):
raise ValueError('argument chunksize needs to be integer type')
bins = np.arange(0, len(frame_or_series), step=chunksize)
for b in bins:
yield frame_or_series[b:b + chunksize] | def chunk_pandas(frame_or_series, chunksize=None) | Chunk a frame into smaller, equal parts. | 3.404444 | 3.267076 | 1.042046 |
if isinstance(model, list):
self.algorithms = self.algorithms + model
else:
self.algorithms.append(model) | def add(self, model) | Add a index method.
This method is used to add index algorithms. If multiple algorithms
are added, the union of the record pairs from the algorithm is taken.
Parameters
----------
model : list, class
A (list of) index algorithm(s) from
:mod:`recordlinkage.index`. | 3.588941 | 3.378699 | 1.062226 |
if not self.algorithms:
raise ValueError("No algorithms given.")
# start timing
start_time = time.time()
pairs = None
for cl_alg in self.algorithms:
pairs_i = cl_alg.index(x, x_link)
if pairs is None:
pairs = pairs_i
else:
pairs = pairs.union(pairs_i)
if x_link is not None:
n_max = max_pairs((x, x_link))
else:
n_max = max_pairs(x)
# store the number of pairs
n = pairs.shape[0]
eta = time.time() - start_time
rr = 1 - n / n_max
i_max = '?' if self._i_max is None else self._i_max
self._eta.append(eta)
self._n.append(n)
self._n_max.append(n_max)
# log
logging.info("indexing [{:d}/{}] - time: {:.2f}s - pairs: {:d}/{:d} - "
"rr: {:0.5f}".format(self._i, i_max, eta, n, n_max, rr))
# log total
if self._output_log_total:
n_total = np.sum(self._n)
n_max_total = np.sum(self._n_max)
rr_avg = 1 - n_total / n_max_total
eta_total = np.sum(self._eta)
logging.info("indexing [{:d}/{}] - time: {:.2f}s - "
"pairs_total: {:d}/{:d} - rr_total: {:0.5f}".format(
self._i, i_max, eta_total,
n_total, n_max_total, rr_avg))
self._i += 1
return pairs | def index(self, x, x_link=None) | Make an index of record pairs.
Parameters
----------
x: pandas.DataFrame
A pandas DataFrame. When `x_link` is None, the algorithm makes
record pairs within the DataFrame. When `x_link` is not empty,
the algorithm makes pairs between `x` and `x_link`.
x_link: pandas.DataFrame, optional
A second DataFrame to link with the DataFrame x.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair contains
the index labels of two records. | 2.735817 | 2.726147 | 1.003547 |
pairs = self._link_index(df_a, df_a)
# Remove all pairs not in the lower triangular part of the matrix.
# This part can be inproved by not comparing the level values, but the
# level itself.
pairs = pairs[pairs.labels[0] > pairs.labels[1]]
return pairs | def _dedup_index(self, df_a) | Build an index for deduplicating a dataset.
Parameters
----------
df_a : (tuple of) pandas.Series
The data of the DataFrame to build the index with.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair
contains the index values of two records. The records are
sampled from the lower triangular part of the matrix. | 9.761801 | 9.95735 | 0.980361 |
if x is None: # error
raise ValueError("provide at least one dataframe")
elif x_link is not None: # linking (two arg)
x = (x, x_link)
elif isinstance(x, (list, tuple)): # dedup or linking (single arg)
x = tuple(x)
else: # dedup (single arg)
x = (x,)
if self.verify_integrity:
for df in x:
self._verify_integrety(df)
# linking
if not self._deduplication(x):
pairs = self._link_index(*x)
names = self._make_index_names(x[0].index.name, x[1].index.name)
# deduplication
else:
pairs = self._dedup_index(*x)
names = self._make_index_names(x[0].index.name, x[0].index.name)
pairs.rename(names, inplace=True)
return pairs | def index(self, x, x_link=None) | Make an index of record pairs.
Use a custom function to make record pairs of one or two dataframes.
Each function should return a pandas.MultiIndex with record pairs.
Parameters
----------
x: pandas.DataFrame
A pandas DataFrame. When `x_link` is None, the algorithm makes
record pairs within the DataFrame. When `x_link` is not empty,
the algorithm makes pairs between `x` and `x_link`.
x_link: pandas.DataFrame, optional
A second DataFrame to link with the DataFrame x.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair contains
the index labels of two records. | 3.742158 | 3.531725 | 1.059583 |
if self._f_compare_vectorized:
return self._f_compare_vectorized(
*(args + self.args), **self.kwargs)
else:
raise NotImplementedError() | def _compute_vectorized(self, *args) | Compare attributes (vectorized)
Parameters
----------
*args : pandas.Series
pandas.Series' as arguments.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects. | 5.842211 | 5.807762 | 1.005932 |
result = self._compute_vectorized(*tuple(left_on + right_on))
return result | def _compute(self, left_on, right_on) | Compare the data on the left and right.
:meth:`BaseCompareFeature._compute` and
:meth:`BaseCompareFeature.compute` differ on the accepted
arguments. `_compute` accepts indexed data while `compute`
accepts the record pairs and the DataFrame's.
Parameters
----------
left_on : (tuple of) pandas.Series
Data to compare with `right_on`
right_on : (tuple of) pandas.Series
Data to compare with `left_on`
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects. | 10.363228 | 15.822404 | 0.654972 |
if not is_pandas_2d_multiindex(pairs):
raise ValueError(
"expected pandas.MultiIndex with record pair indices "
"as first argument"
)
if not isinstance(x, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as second argument")
if x_link is not None and not isinstance(x_link, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as third argument")
labels_left = listify(self.labels_left, [])
labels_right = listify(self.labels_right, [])
if x_link is None:
df_a = frame_indexing(x[labels_left + labels_right], pairs, 0)
data1 = tuple([df_a[lbl] for lbl in listify(self.labels_left)])
data2 = tuple([df_a[lbl] for lbl in listify(self.labels_right)])
else:
df_a = frame_indexing(x[labels_left], pairs, 0)
data1 = tuple([df_a[lbl] for lbl in listify(self.labels_left)])
df_b = frame_indexing(x_link[labels_right], pairs, 1)
data2 = tuple([df_b[lbl] for lbl in listify(self.labels_right)])
results = self._compute(data1, data2)
return results | def compute(self, pairs, x, x_link=None) | Compare the records of each record pair.
Calling this method starts the comparing of records.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects. | 2.34195 | 2.2674 | 1.032879 |
label = kwargs.pop('label', None)
if isinstance(labels_left, tuple):
labels_left = list(labels_left)
if isinstance(labels_right, tuple):
labels_right = list(labels_right)
feature = BaseCompareFeature(
labels_left, labels_right, args, kwargs, label=label)
feature._f_compare_vectorized = comp_func
self.add(feature) | def compare_vectorized(self, comp_func, labels_left, labels_right,
*args, **kwargs) | Compute the similarity between values with a callable.
This method initialises the comparing of values with a custom
function/callable. The function/callable should accept
numpy.ndarray's.
Example
-------
>>> comp = recordlinkage.Compare()
>>> comp.compare_vectorized(custom_callable, 'first_name', 'name')
>>> comp.compare(PAIRS, DATAFRAME1, DATAFRAME2)
Parameters
----------
comp_func : function
A comparison function. This function can be a built-in function
or a user defined comparison function. The function should accept
numpy.ndarray's as first two arguments.
labels_left : label, pandas.Series, pandas.DataFrame
The labels, Series or DataFrame to compare.
labels_right : label, pandas.Series, pandas.DataFrame
The labels, Series or DataFrame to compare.
*args :
Additional arguments to pass to callable comp_func.
**kwargs :
Additional keyword arguments to pass to callable comp_func.
(keyword 'label' is reserved.)
label : (list of) label(s)
The name of the feature and the name of the column. IMPORTANT:
This argument is a keyword argument and can not be part of the
arguments of comp_func. | 3.073419 | 3.778877 | 0.813315 |
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_left)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels) | def _get_labels_left(self, validate=None) | Get all labels of the left dataframe. | 9.172642 | 7.963761 | 1.151798 |
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_right)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels) | def _get_labels_right(self, validate=None) | Get all labels of the right dataframe. | 9.448795 | 8.127307 | 1.162599 |
feat_conc = []
for feat, label in objs:
# result is tuple of results
if isinstance(feat, tuple):
if label is None:
label = [None] * len(feat)
partial_result = self._union(
zip(feat, label), column_i=column_i)
feat_conc.append(partial_result)
column_i = column_i + partial_result.shape[1]
# result is pandas.Series.
elif isinstance(feat, pandas.Series):
feat.reset_index(drop=True, inplace=True)
if label is None:
label = column_i
feat.rename(label, inplace=True)
feat_conc.append(feat)
column_i = column_i + 1
# result is pandas.DataFrame
elif isinstance(feat, pandas.DataFrame):
feat.reset_index(drop=True, inplace=True)
if label is None:
label = np.arange(column_i, column_i + feat.shape[1])
feat.columns = label
feat_conc.append(feat)
column_i = column_i + feat.shape[1]
# result is numpy 1d array
elif is_numpy_like(feat) and len(feat.shape) == 1:
if label is None:
label = column_i
f = pandas.Series(feat, name=label, copy=False)
feat_conc.append(f)
column_i = column_i + 1
# result is numpy 2d array
elif is_numpy_like(feat) and len(feat.shape) == 2:
if label is None:
label = np.arange(column_i, column_i + feat.shape[1])
feat_df = pandas.DataFrame(feat, columns=label, copy=False)
if label is None:
feat_df.columns = [None for _ in range(feat_df.shape[1])]
feat_conc.append(feat_df)
column_i = column_i + feat.shape[1]
# other results are not (yet) supported
else:
raise ValueError("expected numpy.ndarray or "
"pandas object to be returned, "
"got '{}'".format(feat.__class__.__name__))
result = pandas.concat(feat_conc, axis=1, copy=False)
if index is not None:
result.set_index(index, inplace=True)
return result | def _union(self, objs, index=None, column_i=0) | Make a union of the features.
The term 'union' is based on the terminology of scikit-learn. | 2.077422 | 2.060889 | 1.008022 |
if not isinstance(pairs, pandas.MultiIndex):
raise ValueError(
"expected pandas.MultiIndex with record pair indices "
"as first argument"
)
if not isinstance(x, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as second argument")
if x_link is not None and not isinstance(x_link, pandas.DataFrame):
raise ValueError("expected pandas.DataFrame as third argument")
if self.n_jobs == 1:
results = self._compute(pairs, x, x_link)
elif self.n_jobs > 1:
results = self._compute_parallel(
pairs, x, x_link, n_jobs=self.n_jobs)
else:
raise ValueError("number of jobs should be positive integer")
return results | def compute(self, pairs, x, x_link=None) | Compare the records of each record pair.
Calling this method starts the comparing of records.
Parameters
----------
pairs : pandas.MultiIndex
A pandas MultiIndex with the record pairs to compare. The indices
in the MultiIndex are indices of the DataFrame(s) to link.
x : pandas.DataFrame
The DataFrame to link. If `x_link` is given, the comparing is a
linking problem. If `x_link` is not given, the problem is one of
deduplication.
x_link : pandas.DataFrame, optional
The second DataFrame.
Returns
-------
pandas.DataFrame
A pandas DataFrame with feature vectors, i.e. the result of
comparing each record pair. | 2.271972 | 2.095831 | 1.084044 |
warnings.warn("learn is deprecated, {}.fit_predict "
"instead".format(self.__class__.__name__))
return self.fit_predict(*args, **kwargs) | def learn(self, *args, **kwargs) | [DEPRECATED] Use 'fit_predict'. | 5.346018 | 3.838433 | 1.392761 |
logging.info("Classification - start training {}".format(
self.__class__.__name__)
)
self._initialise_classifier(comparison_vectors)
# start timing
start_time = time.time()
if isinstance(match_index, (pandas.MultiIndex, pandas.Index)):
try:
y = pandas.Series(0, index=comparison_vectors.index)
y.loc[match_index & comparison_vectors.index] = 1
except pandas.IndexError as err:
# The are no matches. So training is not possible.
if len(match_index & comparison_vectors.index) == 0:
raise LearningError(
"both matches and non-matches needed in the" +
"trainingsdata, only non-matches found"
)
else:
raise err
self._fit(comparison_vectors.values, y.values)
elif match_index is None:
self._fit(comparison_vectors.values)
else:
raise ValueError(
"'match_index' has incorrect type '{}'".format(
type(match_index)
)
)
# log timing
logf_time = "Classification - training computation time: ~{:.2f}s"
logging.info(logf_time.format(time.time() - start_time)) | def fit(self, comparison_vectors, match_index=None) | Train the classifier.
Parameters
----------
comparison_vectors : pandas.DataFrame
The comparison vectors (or features) to train the model with.
match_index : pandas.MultiIndex
A pandas.MultiIndex object with the true matches.
The MultiIndex contains only the true matches. Default None.
Note
----
A note in case of finding links within a single dataset (for example
deduplication). Unsure that the training record pairs are from the
lower triangular part of the dataset/matrix. See detailed information
here: link. | 4.194519 | 4.146854 | 1.011494 |
self.fit(comparison_vectors, match_index)
result = self.predict(comparison_vectors)
return result | def fit_predict(self, comparison_vectors, match_index=None) | Train the classifier.
Parameters
----------
comparison_vectors : pandas.DataFrame
The comparison vectors.
match_index : pandas.MultiIndex
The true matches.
return_type : str
Deprecated. Use recordlinkage.options instead. Use the option
`recordlinkage.set_option('classification.return_type', 'index')`
instead.
Returns
-------
pandas.Series
A pandas Series with the labels 1 (for the matches) and 0 (for the
non-matches). | 2.960048 | 4.759411 | 0.621936 |
logging.info("Classification - predict matches and non-matches")
# make the predicition
prediction = self._predict(comparison_vectors.values)
self._post_predict(prediction)
# format and return the result
return self._return_result(prediction, comparison_vectors) | def predict(self, comparison_vectors) | Predict the class of the record pairs.
Classify a set of record pairs based on their comparison vectors into
matches, non-matches and possible matches. The classifier has to be
trained to call this method.
Parameters
----------
comparison_vectors : pandas.DataFrame
Dataframe with comparison vectors.
return_type : str
Deprecated. Use recordlinkage.options instead. Use the option
`recordlinkage.set_option('classification.return_type', 'index')`
instead.
Returns
-------
pandas.Series
A pandas Series with the labels 1 (for the matches) and 0 (for the
non-matches). | 7.492128 | 7.896978 | 0.948733 |
if return_type is not None:
warnings.warn("The argument 'return_type' is removed. "
"Default value is now 'series'.",
VisibleDeprecationWarning, stacklevel=2)
logging.info("Classification - compute probabilities")
prob_match = self._prob_match(comparison_vectors.values)
return pandas.Series(prob_match, index=comparison_vectors.index) | def prob(self, comparison_vectors, return_type=None) | Compute the probabilities for each record pair.
For each pair of records, estimate the probability of being a match.
Parameters
----------
comparison_vectors : pandas.DataFrame
The dataframe with comparison vectors.
return_type : str
Deprecated. (default 'series')
Returns
-------
pandas.Series or numpy.ndarray
The probability of being a match for each record pair. | 5.649849 | 4.839329 | 1.167486 |
return_type = cf.get_option('classification.return_type')
if type(result) != np.ndarray:
raise ValueError("numpy.ndarray expected.")
# return the pandas.MultiIndex
if return_type == 'index':
return comparison_vectors.index[result.astype(bool)]
# return a pandas.Series
elif return_type == 'series':
return pandas.Series(
result,
index=comparison_vectors.index,
name='classification')
# return a numpy.ndarray
elif return_type == 'array':
return result
# return_type not known
else:
raise ValueError(
"return_type {} unknown. Choose 'index', 'series' or "
"'array'".format(return_type)) | def _return_result(self, result, comparison_vectors=None) | Return different formatted classification results. | 3.472426 | 3.235108 | 1.073357 |
if len(m) != len(u):
raise ValueError("the length of 'm' is not equal the length of 'u'")
if n_match >= n or n_match < 0:
raise ValueError("the number of matches is bounded by [0, n]")
# set the random seed
np.random.seed(random_state)
matches = []
nonmatches = []
sample_set = np.array([0, 1], dtype=dtype)
for i, _ in enumerate(m):
p_mi = [1 - m[i], m[i]]
p_ui = [1 - u[i], u[i]]
comp_mi = np.random.choice(sample_set, (n_match, 1), p=p_mi)
comp_ui = np.random.choice(sample_set, (n - n_match, 1), p=p_ui)
nonmatches.append(comp_ui)
matches.append(comp_mi)
match_block = np.concatenate(matches, axis=1)
nonmatch_block = np.concatenate(nonmatches, axis=1)
data_np = np.concatenate((match_block, nonmatch_block), axis=0)
index_np = np.random.randint(1001, 1001 + n * 2, (n, 2))
data_col_names = ['c_%s' % (i + 1) for i in range(len(m))]
data_mi = pd.MultiIndex.from_arrays([index_np[:, 0], index_np[:, 1]])
data_df = pd.DataFrame(data_np, index=data_mi, columns=data_col_names)
features = data_df.sample(frac=1, random_state=random_state)
if return_links:
links = data_mi[:n_match]
return features, links
else:
return features | def binary_vectors(n, n_match, m=[0.9] * 8, u=[0.1] * 8,
random_state=None, return_links=False, dtype=np.int8) | Generate random binary comparison vectors.
This function is used to generate random comparison vectors. The
result of each comparison is a binary value (0 or 1).
Parameters
----------
n : int
The total number of comparison vectors.
n_match : int
The number of matching record pairs.
m : list, default [0.9] * 8, optional
A list of m probabilities of each partially identifying
variable. The m probability is the probability that an
identifier in matching record pairs agrees.
u : list, default [0.9] * 8, optional
A list of u probabilities of each partially identifying
variable. The u probability is the probability that an
identifier in non-matching record pairs agrees.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator with an integer or numpy
RandomState object.
return_links: bool
When True, the function returns also the true links.
dtype: numpy.dtype
The dtype of each column in the returned DataFrame.
Returns
-------
pandas.DataFrame
A dataframe with comparison vectors. | 2.259429 | 2.340621 | 0.965312 |
# TODO: add notfitted warnings
if self.kernel.classes_.shape[0] != 2:
raise ValueError("Number of classes is {}, expected 2.".format(
self.kernel.classes_.shape[0]))
# # get the position of match probabilities
# classes = list(self.kernel.classes_)
# return classes.index(1)
return 1 | def _match_class_pos(self) | Return the position of the match class. | 7.114592 | 6.35613 | 1.119328 |
# TODO: add notfitted warnings
if self.kernel.classes_.shape[0] != 2:
raise ValueError("Number of classes is {}, expected 2.".format(
self.kernel.classes_.shape[0]))
# # get the position of match probabilities
# classes = list(self.kernel.classes_)
# return classes.index(0)
return 0 | def _nonmatch_class_pos(self) | Return the position of the non-match class. | 7.031487 | 6.54406 | 1.074484 |
m = self.kernel.feature_log_prob_[self._match_class_pos()]
return self._prob_inverse_transform(m) | def log_m_probs(self) | Log probability P(x_i==1|Match) as described in the FS framework. | 21.882383 | 16.934679 | 1.292164 |
u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]
return self._prob_inverse_transform(u) | def log_u_probs(self) | Log probability P(x_i==1|Non-match) as described in the FS framework. | 26.623386 | 16.505678 | 1.612983 |
m = self.kernel.feature_log_prob_[self._match_class_pos()]
u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]
return self._prob_inverse_transform(m - u) | def log_weights(self) | Log weights as described in the FS framework. | 9.573521 | 8.837487 | 1.083285 |
log_m = self.kernel.feature_log_prob_[self._match_class_pos()]
return self._prob_inverse_transform(numpy.exp(log_m)) | def m_probs(self) | Probability P(x_i==1|Match) as described in the FS framework. | 17.686941 | 12.97456 | 1.363202 |
log_u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]
return self._prob_inverse_transform(numpy.exp(log_u)) | def u_probs(self) | Probability P(x_i==1|Non-match) as described in the FS framework. | 20.090752 | 13.323194 | 1.507953 |
m = self.kernel.feature_log_prob_[self._match_class_pos()]
u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]
return self._prob_inverse_transform(numpy.exp(m - u)) | def weights(self) | Weights as described in the FS framework. | 9.195189 | 8.426649 | 1.091203 |
# Set the start point of the classifier.
self.kernel.init = numpy.array(
[[0.05] * len(list(comparison_vectors)),
[0.95] * len(list(comparison_vectors))]) | def _initialise_classifier(self, comparison_vectors) | Set the centers of the clusters. | 6.327798 | 5.771084 | 1.096466 |
setdiff = set(label) - set(df.columns.tolist())
if len(setdiff) == 0:
return True
else:
return False | def is_label_dataframe(label, df) | check column label existance | 3.685113 | 3.333835 | 1.105367 |
if isinstance(x, list):
return x
elif isinstance(x, tuple):
return list(x)
elif x is None:
return none_value
else:
return [x] | def listify(x, none_value=[]) | Make a list of the argument if it is not a list. | 1.947791 | 1.743411 | 1.11723 |
return pandas.DataFrame(index.tolist(), index=index, columns=index.names) | def multi_index_to_frame(index) | Replicates MultiIndex.to_frame, which was introduced in pandas 0.21,
for the sake of backwards compatibility. | 6.642571 | 5.902788 | 1.125328 |
Ntotal = index.shape[0]
Nsections = int(chunks)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] + extras * [Neach_section + 1] +
(Nsections - extras) * [Neach_section])
div_points = numpy.array(section_sizes).cumsum()
sub_ind = []
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_ind.append(index[st:end])
return sub_ind | def index_split(index, chunks) | Function to split pandas.Index and pandas.MultiIndex objects.
Split :class:`pandas.Index` and :class:`pandas.MultiIndex` objects
into chunks. This function is based on :func:`numpy.array_split`.
Parameters
----------
index : pandas.Index, pandas.MultiIndex
A pandas.Index or pandas.MultiIndex to split into chunks.
chunks : int
The number of parts to split the index into.
Returns
-------
list
A list with chunked pandas.Index or pandas.MultiIndex objects. | 3.336728 | 3.526477 | 0.946193 |
if indexing_type == "label":
data = frame.loc[multi_index.get_level_values(level_i)]
data.index = multi_index
elif indexing_type == "position":
data = frame.iloc[multi_index.get_level_values(level_i)]
data.index = multi_index
else:
raise ValueError("indexing_type needs to be 'label' or 'position'")
return data | def frame_indexing(frame, multi_index, level_i, indexing_type='label') | Index dataframe based on one level of MultiIndex.
Arguments
---------
frame : pandas.DataFrame
The datafrme to select records from.
multi_index : pandas.MultiIndex
A pandas multiindex were one fo the levels is used to sample the
dataframe with.
level_i : int, str
The level of the multiIndex to index on.
indexing_type : str
The type of indexing. The value can be 'label' or 'position'.
Default 'label'. | 1.992462 | 2.023763 | 0.984533 |
if pandas.notnull(missing_value):
if isinstance(series_or_arr, (numpy.ndarray)):
series_or_arr[numpy.isnan(series_or_arr)] = missing_value
else:
series_or_arr.fillna(missing_value, inplace=True)
return series_or_arr | def fillna(series_or_arr, missing_value=0.0) | Fill missing values in pandas objects and numpy arrays.
Arguments
---------
series_or_arr : pandas.Series, numpy.ndarray
The numpy array or pandas series for which the missing values
need to be replaced.
missing_value : float, int, str
The value to replace the missing value with. Default 0.0.
Returns
-------
pandas.Series, numpy.ndarray
The numpy array or pandas series with the missing values
filled. | 2.00923 | 2.558303 | 0.785376 |
model = None
if hasattr(field, 'related_model') and field.related_model: # pragma: no cover
model = field.related_model
# Django<1.8 doesn't have the related_model API, so we need to use rel,
# which was removed in Django 2.0
elif hasattr(field, 'rel') and field.rel: # pragma: no cover
model = field.rel.to
return model | def get_related_model(field) | Gets the related model from a related field | 3.518676 | 3.350489 | 1.050198 |
df = self.to_dataframe(fieldnames, verbose=verbose,
coerce_float=coerce_float)
return df.pivot_table(values=values, fill_value=fill_value, index=rows,
columns=cols, aggfunc=aggfunc, margins=margins,
dropna=dropna) | def to_pivot_table(self, fieldnames=(), verbose=True,
values=None, rows=None, cols=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, coerce_float=True) | A convenience method for creating a spread sheet style pivot table
as a DataFrame
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
values: The field to use to calculate the values to aggregate.
rows: The list of field names to group on
Keys to group on the x-axis of the pivot table
cols: The list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc: How to arregate the values. By default this would be
``numpy.mean``. A list of aggregates functions can be passed
In this case the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
fill_value: A scalar value to replace the missing values with
margins: Boolean, default False Add all row / columns
(e.g. for subtotal / grand totals)
dropna: Boolean, default True.
Do not include columns whose entries are all NaN
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions for foreign key fields else use the
actual values set in the model
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point. | 2.073924 | 2.743435 | 0.755959 |
return read_frame(self, fieldnames=fieldnames, verbose=verbose,
index_col=index, coerce_float=coerce_float,
datetime_index=datetime_index) | def to_dataframe(self, fieldnames=(), verbose=True, index=None,
coerce_float=False, datetime_index=False) | Returns a DataFrame from the queryset
Paramaters
-----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
index: specify the field to use for the index. If the index
field is not in fieldnames it will be appended. This
is mandatory for timeseries.
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions for foreign key fields else
use the actual values set in the model
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point.
datetime_index: specify whether index should be converted to a
DateTimeIndex. | 2.364197 | 3.486945 | 0.678014 |
if fieldnames:
fieldnames = pd.unique(fieldnames)
if index_col is not None and index_col not in fieldnames:
# Add it to the field names if not already there
fieldnames = tuple(fieldnames) + (index_col,)
fields = to_fields(qs, fieldnames)
elif is_values_queryset(qs):
if django.VERSION < (1, 9): # pragma: no cover
annotation_field_names = list(qs.query.annotation_select)
if annotation_field_names is None:
annotation_field_names = []
extra_field_names = qs.extra_names
if extra_field_names is None:
extra_field_names = []
select_field_names = qs.field_names
else: # pragma: no cover
annotation_field_names = list(qs.query.annotation_select)
extra_field_names = list(qs.query.extra_select)
select_field_names = list(qs.query.values_select)
fieldnames = select_field_names + annotation_field_names + \
extra_field_names
fields = [None if '__' in f else qs.model._meta.get_field(f)
for f in select_field_names] + \
[None] * (len(annotation_field_names) + len(extra_field_names))
uniq_fields = set()
fieldnames, fields = zip(
*(f for f in zip(fieldnames, fields)
if f[0] not in uniq_fields and not uniq_fields.add(f[0])))
else:
fields = qs.model._meta.fields
fieldnames = [f.name for f in fields]
fieldnames += list(qs.query.annotation_select.keys())
if is_values_queryset(qs):
recs = list(qs)
else:
recs = list(qs.values_list(*fieldnames))
df = pd.DataFrame.from_records(recs, columns=fieldnames,
coerce_float=coerce_float)
if verbose:
update_with_verbose(df, fieldnames, fields)
if index_col is not None:
df.set_index(index_col, inplace=True)
if datetime_index:
df.index = pd.to_datetime(df.index, errors="ignore")
return df | def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False,
verbose=True, datetime_index=False) | Returns a dataframe from a QuerySet
Optionally specify the field names/columns to utilize and
a field as the index
Parameters
----------
qs: The Django QuerySet.
fieldnames: The model field names to use in creating the frame.
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
index_col: specify the field to use for the index. If the index
field is not in the field list it will be appended
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric data (like
decimal.Decimal) to floating point, useful for SQL result sets
verbose: boolean If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values.
The human readable version of the foreign key field is
defined in the ``__unicode__`` or ``__str__``
methods of the related class definition
datetime_index: specify whether index should be converted to a
DateTimeIndex. | 2.371132 | 2.379661 | 0.996416 |
if root is None:
return 0
left = _is_balanced(root.left)
if left < 0:
return -1
right = _is_balanced(root.right)
if right < 0:
return -1
return -1 if abs(left - right) > 1 else max(left, right) + 1 | def _is_balanced(root) | Return the height if the binary tree is balanced, -1 otherwise.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:return: Height if the binary tree is balanced, -1 otherwise.
:rtype: int | 1.681112 | 1.710255 | 0.98296 |
if root is None:
return True
return (
min_value < root.value < max_value and
_is_bst(root.left, min_value, root.value) and
_is_bst(root.right, root.value, max_value)
) | def _is_bst(root, min_value=float('-inf'), max_value=float('inf')) | Check if the binary tree is a BST (binary search tree).
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param min_value: Minimum node value seen.
:type min_value: int | float
:param max_value: Maximum node value seen.
:type max_value: int | float
:return: True if the binary tree is a BST, False otherwise.
:rtype: bool | 1.630854 | 1.749774 | 0.932037 |
max_node_count = 2 ** (height + 1) - 1
node_values = list(range(max_node_count))
return _build_bst_from_sorted_values(node_values) | def _generate_perfect_bst(height) | Generate a perfect BST (binary search tree) and return its root.
:param height: Height of the BST.
:type height: int
:return: Root node of the BST.
:rtype: binarytree.Node | 3.131277 | 4.007696 | 0.781316 |
if len(sorted_values) == 0:
return None
mid_index = len(sorted_values) // 2
root = Node(sorted_values[mid_index])
root.left = _build_bst_from_sorted_values(sorted_values[:mid_index])
root.right = _build_bst_from_sorted_values(sorted_values[mid_index + 1:])
return root | def _build_bst_from_sorted_values(sorted_values) | Recursively build a perfect BST from odd number of sorted values.
:param sorted_values: Odd number of sorted values.
:type sorted_values: [int | float]
:return: Root node of the BST.
:rtype: binarytree.Node | 1.407208 | 1.428107 | 0.985366 |
max_leaf_count = 2 ** height
half_leaf_count = max_leaf_count // 2
# A very naive way of mimicking normal distribution
roll_1 = random.randint(0, half_leaf_count)
roll_2 = random.randint(0, max_leaf_count - half_leaf_count)
return roll_1 + roll_2 or half_leaf_count | def _generate_random_leaf_count(height) | Return a random leaf count for building binary trees.
:param height: Height of the binary tree.
:type height: int
:return: Random leaf count.
:rtype: int | 3.187394 | 3.350447 | 0.951334 |
max_node_count = 2 ** (height + 1) - 1
node_values = list(range(max_node_count))
random.shuffle(node_values)
return node_values | def _generate_random_node_values(height) | Return random node values for building binary trees.
:param height: Height of the binary tree.
:type height: int
:return: Randomly generated node values.
:rtype: [int] | 2.451054 | 2.615164 | 0.937247 |
is_descending = True
is_ascending = True
min_node_value = root.value
max_node_value = root.value
size = 0
leaf_count = 0
min_leaf_depth = 0
max_leaf_depth = -1
is_strict = True
is_complete = True
current_nodes = [root]
non_full_node_seen = False
while len(current_nodes) > 0:
max_leaf_depth += 1
next_nodes = []
for node in current_nodes:
size += 1
value = node.value
min_node_value = min(value, min_node_value)
max_node_value = max(value, max_node_value)
# Node is a leaf.
if node.left is None and node.right is None:
if min_leaf_depth == 0:
min_leaf_depth = max_leaf_depth
leaf_count += 1
if node.left is not None:
if node.left.value > value:
is_descending = False
elif node.left.value < value:
is_ascending = False
next_nodes.append(node.left)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
if node.right is not None:
if node.right.value > value:
is_descending = False
elif node.right.value < value:
is_ascending = False
next_nodes.append(node.right)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
# If we see a node with only one child, it is not strict
is_strict &= (node.left is None) == (node.right is None)
current_nodes = next_nodes
return {
'height': max_leaf_depth,
'size': size,
'is_max_heap': is_complete and is_descending,
'is_min_heap': is_complete and is_ascending,
'is_perfect': leaf_count == 2 ** max_leaf_depth,
'is_strict': is_strict,
'is_complete': is_complete,
'leaf_count': leaf_count,
'min_node_value': min_node_value,
'max_node_value': max_node_value,
'min_leaf_depth': min_leaf_depth,
'max_leaf_depth': max_leaf_depth,
} | def _get_tree_properties(root) | Inspect the binary tree and return its properties (e.g. height).
:param root: Root node of the binary tree.
:rtype: binarytree.Node
:return: Binary tree properties.
:rtype: dict | 1.861789 | 1.892175 | 0.983941 |
nodes = [None if v is None else Node(v) for v in values]
for index in range(1, len(nodes)):
node = nodes[index]
if node is not None:
parent_index = (index - 1) // 2
parent = nodes[parent_index]
if parent is None:
raise NodeNotFoundError(
'parent node missing at index {}'.format(parent_index))
setattr(parent, 'left' if index % 2 else 'right', node)
return nodes[0] if nodes else None | def build(values) | Build a tree from `list representation`_ and return its root node.
.. _list representation:
https://en.wikipedia.org/wiki/Binary_tree#Arrays
:param values: List representation of the binary tree, which is a list of
node values in breadth-first order starting from the root (current
node). If a node is at index i, its left child is always at 2i + 1,
right child at 2i + 2, and parent at floor((i - 1) / 2). None indicates
absence of a node at that index. See example below for an illustration.
:type values: [int | float | None]
:return: Root node of the binary tree.
:rtype: binarytree.Node
:raise binarytree.exceptions.NodeNotFoundError: If the list representation
is malformed (e.g. a parent node is missing).
**Example**:
.. doctest::
>>> from binarytree import build
>>>
>>> root = build([1, 2, 3, None, 4])
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
\\
4
<BLANKLINE>
.. doctest::
>>> from binarytree import build
>>>
>>> root = build([None, 2, 3]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NodeNotFoundError: parent node missing at index 0 | 2.614406 | 2.656133 | 0.98429 |
_validate_tree_height(height)
values = _generate_random_node_values(height)
if is_perfect:
return build(values)
leaf_count = _generate_random_leaf_count(height)
root = Node(values.pop(0))
leaves = set()
for value in values:
node = root
depth = 0
inserted = False
while depth < height and not inserted:
attr = random.choice(('left', 'right'))
if getattr(node, attr) is None:
setattr(node, attr, Node(value))
inserted = True
node = getattr(node, attr)
depth += 1
if inserted and depth == height:
leaves.add(node)
if len(leaves) == leaf_count:
break
return root | def tree(height=3, is_perfect=False) | Generate a random binary tree and return its root node.
:param height: Height of the tree (default: 3, range: 0 - 9 inclusive).
:type height: int
:param is_perfect: If set to True (default: False), a perfect binary tree
with all levels filled is returned. If set to False, a perfect binary
tree may still be generated by chance.
:type is_perfect: bool
:return: Root node of the binary tree.
:rtype: binarytree.Node
:raise binarytree.exceptions.TreeHeightError: If height is invalid.
**Example**:
.. doctest::
>>> from binarytree import tree
>>>
>>> root = tree()
>>>
>>> root.height
3
.. doctest::
>>> from binarytree import tree
>>>
>>> root = tree(height=5, is_perfect=True)
>>>
>>> root.height
5
>>> root.is_perfect
True
.. doctest::
>>> from binarytree import tree
>>>
>>> root = tree(height=20) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TreeHeightError: height must be an int between 0 - 9 | 2.776908 | 3.475352 | 0.799029 |
_validate_tree_height(height)
if is_perfect:
return _generate_perfect_bst(height)
values = _generate_random_node_values(height)
leaf_count = _generate_random_leaf_count(height)
root = Node(values.pop(0))
leaves = set()
for value in values:
node = root
depth = 0
inserted = False
while depth < height and not inserted:
attr = 'left' if node.value > value else 'right'
if getattr(node, attr) is None:
setattr(node, attr, Node(value))
inserted = True
node = getattr(node, attr)
depth += 1
if inserted and depth == height:
leaves.add(node)
if len(leaves) == leaf_count:
break
return root | def bst(height=3, is_perfect=False) | Generate a random BST (binary search tree) and return its root node.
:param height: Height of the BST (default: 3, range: 0 - 9 inclusive).
:type height: int
:param is_perfect: If set to True (default: False), a perfect BST with all
levels filled is returned. If set to False, a perfect BST may still be
generated by chance.
:type is_perfect: bool
:return: Root node of the BST.
:rtype: binarytree.Node
:raise binarytree.exceptions.TreeHeightError: If height is invalid.
**Example**:
.. doctest::
>>> from binarytree import bst
>>>
>>> root = bst()
>>>
>>> root.height
3
>>> root.is_bst
True
.. doctest::
>>> from binarytree import bst
>>>
>>> root = bst(10) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TreeHeightError: height must be an int between 0 - 9 | 2.559216 | 3.032848 | 0.843833 |
_validate_tree_height(height)
values = _generate_random_node_values(height)
if not is_perfect:
# Randomly cut some of the leaf nodes away
random_cut = random.randint(2 ** height, len(values))
values = values[:random_cut]
if is_max:
negated = [-v for v in values]
heapq.heapify(negated)
return build([-v for v in negated])
else:
heapq.heapify(values)
return build(values) | def heap(height=3, is_max=True, is_perfect=False) | Generate a random heap and return its root node.
:param height: Height of the heap (default: 3, range: 0 - 9 inclusive).
:type height: int
:param is_max: If set to True (default: True), generate a max heap. If set
to False, generate a min heap. A binary tree with only the root node is
considered both a min and max heap.
:type is_max: bool
:param is_perfect: If set to True (default: False), a perfect heap with all
levels filled is returned. If set to False, a perfect heap may still be
generated by chance.
:type is_perfect: bool
:return: Root node of the heap.
:rtype: binarytree.Node
:raise binarytree.exceptions.TreeHeightError: If height is invalid.
**Example**:
.. doctest::
>>> from binarytree import heap
>>>
>>> root = heap()
>>>
>>> root.height
3
>>> root.is_max_heap
True
.. doctest::
>>> from binarytree import heap
>>>
>>> root = heap(4, is_max=False)
>>>
>>> root.height
4
>>> root.is_min_heap
True
.. doctest::
>>> from binarytree import heap
>>>
>>> root = heap(5, is_max=False, is_perfect=True)
>>>
>>> root.height
5
>>> root.is_min_heap
True
>>> root.is_perfect
True
.. doctest::
>>> from binarytree import heap
>>>
>>> root = heap(-1) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TreeHeightError: height must be an int between 0 - 9 | 3.804042 | 5.266496 | 0.72231 |
lines = _build_tree_string(self, 0, index, delimiter)[0]
print('\n' + '\n'.join((line.rstrip() for line in lines))) | def pprint(self, index=False, delimiter='-') | Pretty-print the binary tree.
:param index: If set to True (default: False), display level-order_
indexes using the format: ``{index}{delimiter}{value}``.
:type index: bool
:param delimiter: Delimiter character between the node index and
the node value (default: '-').
:type delimiter: str | unicode
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1) # index: 0, value: 1
>>> root.left = Node(2) # index: 1, value: 2
>>> root.right = Node(3) # index: 2, value: 3
>>> root.left.right = Node(4) # index: 4, value: 4
>>>
>>> root.pprint()
<BLANKLINE>
__1
/ \\
2 3
\\
4
<BLANKLINE>
>>> root.pprint(index=True) # Format: {index}-{value}
<BLANKLINE>
_____0-1_
/ \\
1-2_ 2-3
\\
4-4
<BLANKLINE>
.. note::
If you do not need level-order_ indexes in the output string, use
:func:`binarytree.Node.__str__` instead.
.. _level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search | 5.663034 | 8.855404 | 0.6395 |
has_more_nodes = True
visited = set()
to_visit = [self]
index = 0
while has_more_nodes:
has_more_nodes = False
next_nodes = []
for node in to_visit:
if node is None:
next_nodes.extend((None, None))
else:
if node in visited:
raise NodeReferenceError(
'cyclic node reference at index {}'.format(index))
if not isinstance(node, Node):
raise NodeTypeError(
'invalid node instance at index {}'.format(index))
if not isinstance(node.value, numbers.Number):
raise NodeValueError(
'invalid node value at index {}'.format(index))
if node.left is not None or node.right is not None:
has_more_nodes = True
visited.add(node)
next_nodes.extend((node.left, node.right))
index += 1
to_visit = next_nodes | def validate(self) | Check if the binary tree is malformed.
:raise binarytree.exceptions.NodeReferenceError: If there is a
cyclic reference to a node in the binary tree.
:raise binarytree.exceptions.NodeTypeError: If a node is not an
instance of :class:`binarytree.Node`.
:raise binarytree.exceptions.NodeValueError: If a node value is not a
number (e.g. int, float).
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = root # Cyclic reference to root
>>>
>>> root.validate() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NodeReferenceError: cyclic node reference at index 0 | 2.550714 | 2.310689 | 1.103876 |
current_nodes = [self]
has_more_nodes = True
values = []
while has_more_nodes:
has_more_nodes = False
next_nodes = []
for node in current_nodes:
if node is None:
values.append(None)
next_nodes.extend((None, None))
continue
if node.left is not None or node.right is not None:
has_more_nodes = True
values.append(node.value)
next_nodes.extend((node.left, node.right))
current_nodes = next_nodes
# Get rid of trailing None's
while values and values[-1] is None:
values.pop()
return values | def values(self) | Return the `list representation`_ of the binary tree.
.. _list representation:
https://en.wikipedia.org/wiki/Binary_tree#Arrays
:return: List representation of the binary tree, which is a list of
node values in breadth-first order starting from the root (current
node). If a node is at index i, its left child is always at 2i + 1,
right child at 2i + 2, and parent at index floor((i - 1) / 2). None
indicates absence of a node at that index. See example below for an
illustration.
:rtype: [int | float | None]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.right = Node(4)
>>>
>>> root.values
[1, 2, 3, None, 4] | 2.289678 | 2.631479 | 0.870111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.