code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from scipy.stats import norm a = np.asarray(a) c = norm.ppf(3 / 4.) if normalize else 1 center = np.apply_over_axes(np.median, a, axis) return np.median((np.fabs(a - center)) / c, axis=axis)
def mad(a, normalize=True, axis=0)
Median Absolute Deviation along given axis of an array. Parameters ---------- a : array-like Input array. normalize : boolean. If True, scale by a normalization constant (~0.67) axis : int, optional The defaul is 0. Can also be None. Returns ------- mad : float mad = median(abs(a - median(a))) / c References ---------- .. [1] https://en.wikipedia.org/wiki/Median_absolute_deviation Examples -------- >>> from pingouin import mad >>> a = [1.2, 5.4, 3.2, 7.8, 2.5] >>> mad(a) 2.965204437011204 >>> mad(a, normalize=False) 2.0
3.774945
5.209544
0.724621
from scipy.stats import chi2 a = np.asarray(a) k = np.sqrt(chi2.ppf(0.975, 1)) return (np.fabs(a - np.median(a)) / mad(a)) > k
def madmedianrule(a)
Outlier detection based on the MAD-median rule. Parameters ---------- a : array-like Input array. Returns ------- outliers: boolean (same shape as a) Boolean array indicating whether each sample is an outlier (True) or not (False). References ---------- .. [1] Hall, P., Welsh, A.H., 1985. Limit theorems for the median deviation. Ann. Inst. Stat. Math. 37, 27–36. https://doi.org/10.1007/BF02481078 Examples -------- >>> from pingouin import madmedianrule >>> a = [-1.09, 1., 0.28, -1.51, -0.58, 6.61, -2.43, -0.43] >>> madmedianrule(a) array([False, False, False, False, False, True, False, False])
3.700927
5.426536
0.682005
from scipy.stats import mannwhitneyu x = np.asarray(x) y = np.asarray(y) # Remove NA x, y = remove_na(x, y, paired=False) # Compute test if tail == 'one-sided': tail = 'less' if np.median(x) < np.median(y) else 'greater' uval, pval = mannwhitneyu(x, y, use_continuity=True, alternative=tail) # Effect size 1: common language effect size (McGraw and Wong 1992) diff = x[:, None] - y cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size # Effect size 2: rank biserial correlation (Wendt 1972) rbc = 1 - (2 * uval) / diff.size # diff.size = x.size * y.size # Fill output DataFrame stats = pd.DataFrame({}, index=['MWU']) stats['U-val'] = round(uval, 3) stats['p-val'] = pval stats['RBC'] = round(rbc, 3) stats['CLES'] = round(cles, 3) col_order = ['U-val', 'p-val', 'RBC', 'CLES'] stats = stats.reindex(columns=col_order) return stats
def mwu(x, y, tail='two-sided')
Mann-Whitney U Test (= Wilcoxon rank-sum test). It is the non-parametric version of the independent T-test. Parameters ---------- x, y : array_like First and second set of observations. x and y must be independent. tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- stats : pandas DataFrame Test summary :: 'U-val' : U-value 'p-val' : p-value 'RBC' : rank-biserial correlation (effect size) 'CLES' : common language effect size Notes ----- mwu tests the hypothesis that data in x and y are samples from continuous distributions with equal medians. The test assumes that x and y are independent. This test corrects for ties and by default uses a continuity correction (see :py:func:`scipy.stats.mannwhitneyu` for details). The rank biserial correlation is the difference between the proportion of favorable evidence minus the proportion of unfavorable evidence (see Kerby 2014). The common language effect size is the probability (from 0 to 1) that a randomly selected observation from the first sample will be greater than a randomly selected observation from the second sample. References ---------- .. [1] Mann, H. B., & Whitney, D. R. (1947). On a test of whether one of two random variables is stochastically larger than the other. The annals of mathematical statistics, 50-60. .. [2] Kerby, D. S. (2014). The simple difference formula: An approach to teaching nonparametric correlation. Comprehensive Psychology, 3, 11-IT. .. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size statistic. Psychological bulletin, 111(2), 361. Examples -------- >>> import numpy as np >>> from pingouin import mwu >>> np.random.seed(123) >>> x = np.random.uniform(low=0, high=1, size=20) >>> y = np.random.uniform(low=0.2, high=1.2, size=20) >>> mwu(x, y, tail='two-sided') U-val p-val RBC CLES MWU 97.0 0.00556 0.515 0.758
3.675413
3.013802
1.219527
from scipy.stats import chi2, rankdata, tiecorrect # Check data _check_dataframe(dv=dv, between=between, data=data, effects='between') # Remove NaN values data = data.dropna() # Reset index (avoid duplicate axis error) data = data.reset_index(drop=True) # Extract number of groups and total sample size groups = list(data[between].unique()) n_groups = len(groups) n = data[dv].size # Rank data, dealing with ties appropriately data['rank'] = rankdata(data[dv]) # Find the total of rank per groups grp = data.groupby(between)['rank'] sum_rk_grp = grp.sum().values n_per_grp = grp.count().values # Calculate chi-square statistic (H) H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1) # Correct for ties H /= tiecorrect(data['rank'].values) # Calculate DOF and p-value ddof1 = n_groups - 1 p_unc = chi2.sf(H, ddof1) # Create output dataframe stats = pd.DataFrame({'Source': between, 'ddof1': ddof1, 'H': np.round(H, 3), 'p-unc': p_unc, }, index=['Kruskal']) col_order = ['Source', 'ddof1', 'H', 'p-unc'] stats = stats.reindex(columns=col_order) stats.dropna(how='all', axis=1, inplace=True) # Export to .csv if export_filename is not None: _export_table(stats, export_filename) return stats
def kruskal(dv=None, between=None, data=None, detailed=False, export_filename=None)
Kruskal-Wallis H-test for independent samples. Parameters ---------- dv : string Name of column containing the dependant variable. between : string Name of column containing the between factor. data : pandas DataFrame DataFrame export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- stats : DataFrame Test summary :: 'H' : The Kruskal-Wallis H statistic, corrected for ties 'p-unc' : Uncorrected p-value 'dof' : degrees of freedom Notes ----- The Kruskal-Wallis H-test tests the null hypothesis that the population median of all of the groups are equal. It is a non-parametric version of ANOVA. The test works on 2 or more independent samples, which may have different sizes. Due to the assumption that H has a chi square distribution, the number of samples in each group must not be too small. A typical rule is that each sample must have at least 5 measurements. NaN values are automatically removed. Examples -------- Compute the Kruskal-Wallis H-test for independent samples. >>> from pingouin import kruskal, read_dataset >>> df = read_dataset('anova') >>> kruskal(dv='Pain threshold', between='Hair color', data=df) Source ddof1 H p-unc Kruskal Hair color 3 10.589 0.014172
3.844047
3.459535
1.111146
from scipy.stats import rankdata, chi2, find_repeats # Check data _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects='within') # Collapse to the mean data = data.groupby([subject, within]).mean().reset_index() # Remove NaN if data[dv].isnull().any(): data = remove_rm_na(dv=dv, within=within, subject=subject, data=data[[subject, within, dv]]) # Extract number of groups and total sample size grp = data.groupby(within)[dv] rm = list(data[within].unique()) k = len(rm) X = np.array([grp.get_group(r).values for r in rm]).T n = X.shape[0] # Rank per subject ranked = np.zeros(X.shape) for i in range(n): ranked[i] = rankdata(X[i, :]) ssbn = (ranked.sum(axis=0)**2).sum() # Compute the test statistic Q = (12 / (n * k * (k + 1))) * ssbn - 3 * n * (k + 1) # Correct for ties ties = 0 for i in range(n): replist, repnum = find_repeats(X[i]) for t in repnum: ties += t * (t * t - 1) c = 1 - ties / float(k * (k * k - 1) * n) Q /= c # Approximate the p-value ddof1 = k - 1 p_unc = chi2.sf(Q, ddof1) # Create output dataframe stats = pd.DataFrame({'Source': within, 'ddof1': ddof1, 'Q': np.round(Q, 3), 'p-unc': p_unc, }, index=['Friedman']) col_order = ['Source', 'ddof1', 'Q', 'p-unc'] stats = stats.reindex(columns=col_order) stats.dropna(how='all', axis=1, inplace=True) # Export to .csv if export_filename is not None: _export_table(stats, export_filename) return stats
def friedman(dv=None, within=None, subject=None, data=None, export_filename=None)
Friedman test for repeated measurements. Parameters ---------- dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. subject : string Name of column containing the subject identifier. data : pandas DataFrame DataFrame export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- stats : DataFrame Test summary :: 'Q' : The Friedman Q statistic, corrected for ties 'p-unc' : Uncorrected p-value 'dof' : degrees of freedom Notes ----- The Friedman test is used for one-way repeated measures ANOVA by ranks. Data are expected to be in long-format. Note that if the dataset contains one or more other within subject factors, an automatic collapsing to the mean is applied on the dependant variable (same behavior as the ezANOVA R package). As such, results can differ from those of JASP. If you can, always double-check the results. Due to the assumption that the test statistic has a chi squared distribution, the p-value is only reliable for n > 10 and more than 6 repeated measurements. NaN values are automatically removed. Examples -------- Compute the Friedman test for repeated measurements. >>> from pingouin import friedman, read_dataset >>> df = read_dataset('rm_anova') >>> friedman(dv='DesireToKill', within='Disgustingness', ... subject='Subject', data=df) Source ddof1 Q p-unc Friedman Disgustingness 1 9.228 0.002384
3.63799
3.428509
1.0611
from scipy.stats import chi2 # Check data _check_dataframe(dv=dv, within=within, data=data, subject=subject, effects='within') # Remove NaN if data[dv].isnull().any(): data = remove_rm_na(dv=dv, within=within, subject=subject, data=data[[subject, within, dv]]) # Groupby and extract size grp = data.groupby(within)[dv] grp_s = data.groupby(subject)[dv] k = data[within].nunique() dof = k - 1 # n = grp.count().unique()[0] # Q statistic and p-value q = (dof * (k * np.sum(grp.sum()**2) - grp.sum().sum()**2)) / \ (k * grp.sum().sum() - np.sum(grp_s.sum()**2)) p_unc = chi2.sf(q, dof) # Create output dataframe stats = pd.DataFrame({'Source': within, 'dof': dof, 'Q': np.round(q, 3), 'p-unc': p_unc, }, index=['cochran']) # Export to .csv if export_filename is not None: _export_table(stats, export_filename) return stats
def cochran(dv=None, within=None, subject=None, data=None, export_filename=None)
Cochran Q test. Special case of the Friedman test when the dependant variable is binary. Parameters ---------- dv : string Name of column containing the binary dependant variable. within : string Name of column containing the within-subject factor. subject : string Name of column containing the subject identifier. data : pandas DataFrame DataFrame export_filename : string Filename (without extension) for the output file. If None, do not export the table. By default, the file will be created in the current python console directory. To change that, specify the filename with full path. Returns ------- stats : DataFrame Test summary :: 'Q' : The Cochran Q statistic 'p-unc' : Uncorrected p-value 'dof' : degrees of freedom Notes ----- The Cochran Q Test is a non-parametric test for ANOVA with repeated measures where the dependent variable is binary. Data are expected to be in long-format. NaN are automatically removed from the data. The Q statistics is defined as: .. math:: Q = \\frac{(r-1)(r\\sum_j^rx_j^2-N^2)}{rN-\\sum_i^nx_i^2} where :math:`N` is the total sum of all observations, :math:`j=1,...,r` where :math:`r` is the number of repeated measures, :math:`i=1,...,n` where :math:`n` is the number of observations per condition. The p-value is then approximated using a chi-square distribution with :math:`r-1` degrees of freedom: .. math:: Q \\sim \\chi^2(r-1) References ---------- .. [1] Cochran, W.G., 1950. The comparison of percentages in matched samples. Biometrika 37, 256–266. https://doi.org/10.1093/biomet/37.3-4.256 Examples -------- Compute the Cochran Q test for repeated measurements. >>> from pingouin import cochran, read_dataset >>> df = read_dataset('cochran') >>> cochran(dv='Energetic', within='Time', subject='Subject', data=df) Source dof Q p-unc cochran Time 2 6.706 0.034981
3.915335
3.714557
1.054052
if not _isconvertible(float, string): return False elif isinstance(string, (_text_type, _binary_type)) and ( math.isinf(float(string)) or math.isnan(float(string))): return string.lower() in ['inf', '-inf', 'nan'] return True
def _isnumber(string)
>>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False >>> _isnumber("123e45678") False >>> _isnumber("inf") True
3.808424
4.46587
0.852784
return isinstance(string, _bool_type) or\ (isinstance(string, (_binary_type, _text_type)) and string in ("True", "False"))
def _isbool(string)
>>> _isbool(True) True >>> _isbool("False") True >>> _isbool(1) False
5.27175
6.219698
0.847589
if has_invisible and \ (isinstance(string, _text_type) or isinstance(string, _binary_type)): string = _strip_invisible(string) if string is None: return _none_type elif hasattr(string, "isoformat"): # datetime.datetime, date, and time return _text_type elif _isbool(string): return _bool_type elif _isint(string) and numparse: return int elif _isint(string, _long_type) and numparse: return int elif _isnumber(string) and numparse: return float elif isinstance(string, _binary_type): return _binary_type else: return _text_type
def _type(string, has_invisible=True, numparse=True)
The least generic type (type(None), int, float, str, unicode). >>> _type(None) is type(None) True >>> _type("foo") is type("") True >>> _type("1") is type(1) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True >>> _type('\x1b[31m42\x1b[0m') is type(42) True
2.660929
2.711758
0.981256
# optional wide-character support if wcwidth is not None and WIDE_CHARS_MODE: len_fn = wcwidth.wcswidth else: len_fn = len if isinstance(s, _text_type) or isinstance(s, _binary_type): return len_fn(_strip_invisible(s)) else: return len_fn(_text_type(s))
def _visible_width(s)
Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5)
4.50258
5.076517
0.886943
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _multiline_width(multiline_s, line_width_fn=len)
Visible width of a potentially multiline content.
3.526792
3.623228
0.973384
if has_invisible: line_width_fn = _visible_width elif enable_widechars: # optional wide-character support if available line_width_fn = wcwidth.wcswidth else: line_width_fn = len if is_multiline: def width_fn(s): return _multiline_width(s, line_width_fn) else: width_fn = line_width_fn return width_fn
def _choose_width_fn(has_invisible, enable_widechars, is_multiline)
Return a function to calculate visible cell width.
2.791327
2.700332
1.033697
strings, padfn = _align_column_choose_padfn( strings, alignment, has_invisible) width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline) s_widths = list(map(width_fn, strings)) maxwidth = max(max(s_widths), minwidth) # TODO: refactor column alignment in single-line and multiline modes if is_multiline: if not enable_widechars and not has_invisible: padded_strings = [ "\n".join([padfn(maxwidth, s) for s in ms.splitlines()]) for ms in strings] else: # enable wide-character width corrections s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings] visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)] # wcswidth and _visible_width don't count invisible characters; # padfn doesn't need to apply another correction padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)]) for ms, w in zip(strings, visible_widths)] else: # single-line cell values if not enable_widechars and not has_invisible: padded_strings = [padfn(maxwidth, s) for s in strings] else: # enable wide-character width corrections s_lens = list(map(len, strings)) visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)] # wcswidth and _visible_width don't count invisible characters; # padfn doesn't need to apply another correction padded_strings = [ padfn( w, s) for s, w in zip( strings, visible_widths)] return padded_strings
def _align_column(strings, alignment, minwidth=0, has_invisible=True, enable_widechars=False, is_multiline=False)
[string] -> [padded_string]
2.923502
2.878685
1.015569
types = [_type(s, has_invisible, numparse) for s in strings] return reduce(_more_generic, types, _bool_type)
def _column_type(strings, has_invisible=True, numparse=True)
The least generic type all column values are convertible to. >>> _column_type([True, False]) is _bool_type True >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True >>> import datetime as dt >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type True
6.315858
8.598729
0.734511
"Pad string header to width chars given known visible_width of the header." if is_multiline: header_lines = re.split(_multiline_codes, header) padded_lines = [_align_header(h, alignment, width, width_fn(h)) for h in header_lines] return "\n".join(padded_lines) # else: not multiline ninvisible = len(header) - visible_width width += ninvisible if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) elif not alignment: return "{0}".format(header) else: return _padleft(width, header)
def _align_header(header, alignment, width, visible_width, is_multiline=False, width_fn=None)
Pad string header to width chars given known visible_width of the header.
4.585926
3.345514
1.370769
if index is None or index is False: return rows if len(index) != len(rows): print('index=', index) print('rows=', rows) raise ValueError('index must be as long as the number of data rows') rows = [[v] + list(row) for v, row in zip(index, rows)] return rows
def _prepend_row_index(rows, index)
Add a left-most index column.
3.466048
3.338651
1.038158
if isinstance(disable_numparse, Iterable): numparses = [True] * column_count for index in disable_numparse: numparses[index] = False return numparses else: return [not disable_numparse] * column_count
def _expand_numparse(disable_numparse, column_count)
Return a list of bools of length `column_count` which indicates whether number parsing should be used on each column. If `disable_numparse` is a list of indices, each of those indices are False, and everything else is True. If `disable_numparse` is a bool, then the returned list is all the same.
2.288369
2.273346
1.006608
alpha = np.array(alpha) return np.remainder(alpha * n, 2 * np.pi)
def circ_axial(alpha, n)
Transforms n-axial data to a common scale. Parameters ---------- alpha : array Sample of angles in radians n : int Number of modes Returns ------- alpha : float Transformed angles Notes ----- Tranform data with multiple modes (known as axial data) to a unimodal sample, for the purpose of certain analysis such as computation of a mean resultant vector (see Berens 2009). Examples -------- Transform degrees to unimodal radians in the Berens 2009 neuro dataset. >>> import numpy as np >>> from pingouin import read_dataset >>> from pingouin.circular import circ_axial >>> df = read_dataset('circular') >>> alpha = df['Orientation'].values >>> alpha = circ_axial(np.deg2rad(alpha), 2)
4.331222
12.450227
0.347883
from scipy.stats import norm x = np.asarray(x) y = np.asarray(y) # Check size if x.size != y.size: raise ValueError('x and y must have the same length.') # Remove NA x, y = remove_na(x, y, paired=True) n = x.size # Compute correlation coefficient x_sin = np.sin(x - circmean(x)) y_sin = np.sin(y - circmean(y)) # Similar to np.corrcoef(x_sin, y_sin)[0][1] r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2)) # Compute T- and p-values tval = np.sqrt((n * (x_sin**2).mean() * (y_sin**2).mean()) / np.mean(x_sin**2 * y_sin**2)) * r # Approximately distributed as a standard normal pval = 2 * norm.sf(abs(tval)) pval = pval / 2 if tail == 'one-sided' else pval return np.round(r, 3), pval
def circ_corrcc(x, y, tail='two-sided')
Correlation coefficient between two circular variables. Parameters ---------- x : np.array First circular variable (expressed in radians) y : np.array Second circular variable (expressed in radians) tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- r : float Correlation coefficient pval : float Uncorrected p-value Notes ----- Adapted from the CircStats MATLAB toolbox (Berens 2009). Use the np.deg2rad function to convert angles from degrees to radians. Please note that NaN are automatically removed. Examples -------- Compute the r and p-value of two circular variables >>> from pingouin import circ_corrcc >>> x = [0.785, 1.570, 3.141, 3.839, 5.934] >>> y = [0.593, 1.291, 2.879, 3.892, 6.108] >>> r, pval = circ_corrcc(x, y) >>> print(r, pval) 0.942 0.06579836070349088
2.946789
3.043482
0.96823
from scipy.stats import pearsonr, chi2 x = np.asarray(x) y = np.asarray(y) # Check size if x.size != y.size: raise ValueError('x and y must have the same length.') # Remove NA x, y = remove_na(x, y, paired=True) n = x.size # Compute correlation coefficent for sin and cos independently rxs = pearsonr(y, np.sin(x))[0] rxc = pearsonr(y, np.cos(x))[0] rcs = pearsonr(np.sin(x), np.cos(x))[0] # Compute angular-linear correlation (equ. 27.47) r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2)) # Compute p-value pval = chi2.sf(n * r**2, 2) pval = pval / 2 if tail == 'one-sided' else pval return np.round(r, 3), pval
def circ_corrcl(x, y, tail='two-sided')
Correlation coefficient between one circular and one linear variable random variables. Parameters ---------- x : np.array First circular variable (expressed in radians) y : np.array Second circular variable (linear) tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- r : float Correlation coefficient pval : float Uncorrected p-value Notes ----- Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats) Please note that NaN are automatically removed from datasets. Examples -------- Compute the r and p-value between one circular and one linear variables. >>> from pingouin import circ_corrcl >>> x = [0.785, 1.570, 3.141, 0.839, 5.934] >>> y = [1.593, 1.291, -0.248, -2.892, 0.102] >>> r, pval = circ_corrcl(x, y) >>> print(r, pval) 0.109 0.9708899750629236
2.989195
3.133785
0.953861
alpha = np.array(alpha) if isinstance(w, (list, np.ndarray)): w = np.array(w) if alpha.shape != w.shape: raise ValueError("w must have the same shape as alpha.") else: w = np.ones_like(alpha) return np.angle(np.multiply(w, np.exp(1j * alpha)).sum(axis=axis))
def circ_mean(alpha, w=None, axis=0)
Mean direction for circular data. Parameters ---------- alpha : array Sample of angles in radians w : array Number of incidences in case of binned angle data axis : int Compute along this dimension Returns ------- mu : float Mean direction Examples -------- Mean resultant vector of circular data >>> from pingouin import circ_mean >>> alpha = [0.785, 1.570, 3.141, 0.839, 5.934] >>> circ_mean(alpha) 1.012962445838065
2.226115
3.066468
0.725954
alpha = np.array(alpha) w = np.array(w) if w is not None else np.ones(alpha.shape) if alpha.size is not w.size: raise ValueError("Input dimensions do not match") # Compute weighted sum of cos and sin of angles: r = np.multiply(w, np.exp(1j * alpha)).sum(axis=axis) # Obtain length: r = np.abs(r) / w.sum(axis=axis) # For data with known spacing, apply correction factor if d is not None: c = d / 2 / np.sin(d / 2) r = c * r return r
def circ_r(alpha, w=None, d=None, axis=0)
Mean resultant vector length for circular data. Parameters ---------- alpha : array Sample of angles in radians w : array Number of incidences in case of binned angle data d : float Spacing (in radians) of bin centers for binned data. If supplied, a correction factor is used to correct for bias in the estimation of r. axis : int Compute along this dimension Returns ------- r : float Mean resultant length Notes ----- The length of the mean resultant vector is a crucial quantity for the measurement of circular spread or hypothesis testing in directional statistics. The closer it is to one, the more concentrated the data sample is around the mean direction (Berens 2009). Examples -------- Mean resultant vector length of circular data >>> from pingouin import circ_r >>> x = [0.785, 1.570, 3.141, 0.839, 5.934] >>> circ_r(x) 0.49723034495605356
3.899105
4.748212
0.821173
alpha = np.array(alpha) if w is None: r = circ_r(alpha) n = len(alpha) else: if len(alpha) is not len(w): raise ValueError("Input dimensions do not match") r = circ_r(alpha, w, d) n = np.sum(w) # Compute Rayleigh's statistic R = n * r z = (R**2) / n # Compute p value using approxation in Zar (1999), p. 617 pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n)) return np.round(z, 3), pval
def circ_rayleigh(alpha, w=None, d=None)
Rayleigh test for non-uniformity of circular data. Parameters ---------- alpha : np.array Sample of angles in radians. w : np.array Number of incidences in case of binned angle data. d : float Spacing (in radians) of bin centers for binned data. If supplied, a correction factor is used to correct for bias in the estimation of r. Returns ------- z : float Z-statistic pval : float P-value Notes ----- The Rayleigh test asks how large the resultant vector length R must be to indicate a non-uniform distribution (Fisher 1995). H0: the population is uniformly distributed around the circle HA: the populatoin is not distributed uniformly around the circle The assumptions for the Rayleigh test are that (1) the distribution has only one mode and (2) the data is sampled from a von Mises distribution. Examples -------- 1. Simple Rayleigh test for non-uniformity of circular data. >>> from pingouin import circ_rayleigh >>> x = [0.785, 1.570, 3.141, 0.839, 5.934] >>> z, pval = circ_rayleigh(x) >>> print(z, pval) 1.236 0.3048435876500138 2. Specifying w and d >>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2) (0.278, 0.8069972000769801)
4.608276
4.965706
0.92802
pvals = np.asarray(pvals) num_nan = np.isnan(pvals).sum() pvals_corrected = pvals * (float(pvals.size) - num_nan) pvals_corrected = np.clip(pvals_corrected, None, 1) with np.errstate(invalid='ignore'): reject = np.less(pvals_corrected, alpha) return reject, pvals_corrected
def bonf(pvals, alpha=0.05)
P-values correction with Bonferroni method. Parameters ---------- pvals : array_like Array of p-values of the individual tests. alpha : float Error rate (= alpha level). Returns ------- reject : array, bool True if a hypothesis is rejected, False if not pval_corrected : array P-values adjusted for multiple hypothesis testing using the Bonferroni procedure (= multiplied by the number of tests). See also -------- holm : Holm-Bonferroni correction fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction Notes ----- From Wikipedia: Statistical hypothesis testing is based on rejecting the null hypothesis if the likelihood of the observed data under the null hypotheses is low. If multiple hypotheses are tested, the chance of a rare event increases, and therefore, the likelihood of incorrectly rejecting a null hypothesis (i.e., making a Type I error) increases. The Bonferroni correction compensates for that increase by testing each individual hypothesis :math:`p_i` at a significance level of :math:`p_i = \\alpha / n` where :math:`\\alpha` is the desired overall alpha level and :math:`n` is the number of hypotheses. For example, if a trial is testing :math:`n=20` hypotheses with a desired :math:`\\alpha=0.05`, then the Bonferroni correction would test each individual hypothesis at :math:`\\alpha=0.05/20=0.0025``. The Bonferroni adjusted p-values are defined as: .. math:: \\widetilde {p}_{{(i)}}= n \\cdot p_{{(i)}} The Bonferroni correction tends to be a bit too conservative. Note that NaN values are not taken into account in the p-values correction. References ---------- - Bonferroni, C. E. (1935). Il calcolo delle assicurazioni su gruppi di teste. Studi in onore del professore salvatore ortu carboni, 13-60. - https://en.wikipedia.org/wiki/Bonferroni_correction Examples -------- >>> from pingouin import bonf >>> pvals = [.50, .003, .32, .054, .0003] >>> reject, pvals_corr = bonf(pvals, alpha=.05) >>> print(reject, pvals_corr) [False True False False True] [1. 0.015 1. 0.27 0.0015]
2.859871
3.222886
0.887363
# Convert to array and save original shape pvals = np.asarray(pvals) shape_init = pvals.shape pvals = pvals.ravel() num_nan = np.isnan(pvals).sum() # Sort the (flattened) p-values pvals_sortind = np.argsort(pvals) pvals_sorted = pvals[pvals_sortind] sortrevind = pvals_sortind.argsort() ntests = pvals.size - num_nan # Now we adjust the p-values pvals_corr = np.diag(pvals_sorted * np.arange(ntests, 0, -1)[..., None]) pvals_corr = np.maximum.accumulate(pvals_corr) pvals_corr = np.clip(pvals_corr, None, 1) # And revert to the original shape and order pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan)) pvals_corrected = pvals_corr[sortrevind].reshape(shape_init) with np.errstate(invalid='ignore'): reject = np.less(pvals_corrected, alpha) return reject, pvals_corrected
def holm(pvals, alpha=.05)
P-values correction with Holm method. Parameters ---------- pvals : array_like Array of p-values of the individual tests. alpha : float Error rate (= alpha level). Returns ------- reject : array, bool True if a hypothesis is rejected, False if not pvals_corrected : array P-values adjusted for multiple hypothesis testing using the Holm procedure. See also -------- bonf : Bonferroni correction fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction Notes ----- From Wikipedia: In statistics, the Holm–Bonferroni method (also called the Holm method) is used to counteract the problem of multiple comparisons. It is intended to control the family-wise error rate and offers a simple test uniformly more powerful than the Bonferroni correction. The Holm adjusted p-values are the running maximum of the sorted p-values divided by the corresponding increasing alpha level: .. math:: \\frac{\\alpha}{n}, \\frac{\\alpha}{n-1}, ..., \\frac{\\alpha}{1} where :math:`n` is the number of test. The full mathematical formula is: .. math:: \\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}} \\right\\}_{{1}} Note that NaN values are not taken into account in the p-values correction. References ---------- - Holm, S. (1979). A simple sequentially rejective multiple test procedure. Scandinavian journal of statistics, 65-70. - https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method Examples -------- >>> from pingouin import holm >>> pvals = [.50, .003, .32, .054, .0003] >>> reject, pvals_corr = holm(pvals, alpha=.05) >>> print(reject, pvals_corr) [False True False False True] [0.64 0.012 0.64 0.162 0.0015]
3.086741
3.187692
0.968331
from pingouin import anova # Check dataframe if any(v is None for v in [data, groups, raters, scores]): raise ValueError('Data, groups, raters and scores must be specified') assert isinstance(data, pd.DataFrame), 'Data must be a pandas dataframe.' # Check that scores is a numeric variable assert data[scores].dtype.kind in 'fi', 'Scores must be numeric.' # Check that data are fully balanced if data.groupby(raters)[scores].count().nunique() > 1: raise ValueError('Data must be balanced.') # Extract sizes k = data[raters].nunique() # n = data[groups].nunique() # ANOVA and ICC aov = anova(dv=scores, data=data, between=groups, detailed=True) icc = (aov.loc[0, 'MS'] - aov.loc[1, 'MS']) / \ (aov.loc[0, 'MS'] + (k - 1) * aov.loc[1, 'MS']) # Confidence interval alpha = 1 - ci df_num, df_den = aov.loc[0, 'DF'], aov.loc[1, 'DF'] f_lower = aov.loc[0, 'F'] / f.isf(alpha / 2, df_num, df_den) f_upper = aov.loc[0, 'F'] * f.isf(alpha / 2, df_den, df_num) lower = (f_lower - 1) / (f_lower + k - 1) upper = (f_upper - 1) / (f_upper + k - 1) return round(icc, 6), np.round([lower, upper], 3)
def intraclass_corr(data=None, groups=None, raters=None, scores=None, ci=.95)
Intra-class correlation coefficient. Parameters ---------- data : pd.DataFrame Dataframe containing the variables groups : string Name of column in data containing the groups. raters : string Name of column in data containing the raters (scorers). scores : string Name of column in data containing the scores (ratings). ci : float Confidence interval Returns ------- icc : float Intraclass correlation coefficient ci : list Lower and upper confidence intervals Notes ----- The intraclass correlation (ICC) assesses the reliability of ratings by comparing the variability of different ratings of the same subject to the total variation across all ratings and all subjects. The ratings are quantitative (e.g. Likert scale). Inspired from: http://www.real-statistics.com/reliability/intraclass-correlation/ Examples -------- ICC of wine quality assessed by 4 judges. >>> import pingouin as pg >>> data = pg.read_dataset('icc') >>> pg.intraclass_corr(data=data, groups='Wine', raters='Judge', ... scores='Scores', ci=.95) (0.727526, array([0.434, 0.927]))
3.029181
3.12096
0.970592
# eq. 2.3 f = a[0]*math.log(r-1.) + \ a[1]*math.log(r-1.)**2 + \ a[2]*math.log(r-1.)**3 + \ a[3]*math.log(r-1.)**4 # eq. 2.7 and 2.8 corrections if r == 3: f += -0.002 / (1. + 12. * _phi(p)**2) if v <= 4.364: f += 1./517. - 1./(312.*(v,1e38)[np.isinf(v)]) else: f += 1./(191.*(v,1e38)[np.isinf(v)]) return -f
def _func(a, p, r, v)
calculates f-hat for the coefficients in a, probability p, sample mean difference r, and degrees of freedom v.
4.707823
4.664648
1.009256
# There are more generic ways of doing this but profiling # revealed that selecting these points is one of the slow # things that is easy to change. This is about 11 times # faster than the generic algorithm it is replacing. # # it is possible that different break points could yield # better estimates, but the function this is refactoring # just used linear distance. if p >= .99: return .990, .995, .999 elif p >= .975: return .975, .990, .995 elif p >= .95: return .950, .975, .990 elif p >= .9125: return .900, .950, .975 elif p >= .875: return .850, .900, .950 elif p >= .825: return .800, .850, .900 elif p >= .7625: return .750, .800, .850 elif p >= .675: return .675, .750, .800 elif p >= .500: return .500, .675, .750 else: return .100, .500, .675
def _select_ps(p)
returns the points to use for interpolating p
3.265475
3.192173
1.022963
# This one is is about 30 times faster than # the generic algorithm it is replacing. if v >= 120.: return 60, 120, inf elif v >= 60.: return 40, 60, 120 elif v >= 40.: return 30, 40, 60 elif v >= 30.: return 24, 30, 40 elif v >= 24.: return 20, 24, 30 elif v >= 19.5: return 19, 20, 24 if p >= .9: if v < 2.5: return 1, 2, 3 else: if v < 3.5: return 2, 3, 4 vi = int(round(v)) return vi - 1, vi, vi + 1
def _select_vs(v, p)
returns the points to use for interpolating v
3.10561
3.155258
0.984265
# interpolate v (p should be in table) # ordinate: y**2 # abcissa: 1./v # find the 3 closest v values # only p >= .9 have table values for 1 degree of freedom. # The boolean is used to index the tuple and append 1 when # p >= .9 v0, v1, v2 = _select_vs(v, p) # y = f - 1. y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2. y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2. y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2. # if v2 is inf set to a big number so interpolation # calculations will work if v2 > 1e38: v2 = 1e38 # transform v v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2 # calculate derivatives for quadratic interpolation d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \ (y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_) if (v2_ + v0_) >= (v1_ + v1_): d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_) else: d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_) d0 = y1_sq # calculate y y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0) return y
def _interpolate_v(p, r, v)
interpolates v based on the values in the A table for the scalar value of r and th
4.141158
4.200194
0.985945
## print 'q',p # r is interpolated through the q to y here we only need to # account for when p and/or v are not found in the table. global A, p_keys, v_keys if p < .1 or p > .999: raise ValueError('p must be between .1 and .999') if p < .9: if v < 2: raise ValueError('v must be > 2 when p < .9') else: if v < 1: raise ValueError('v must be > 1 when p >= .9') # The easy case. A tabled value is requested. #numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' : p = float(p) if isinstance(v, np.ndarray): v = v.item() if (p,v) in A: y = _func(A[(p,v)], p, r, v) + 1. elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]: # find the 3 closest v values v0, v1, v2 = _select_vs(v, p) # find the 3 closest p values p0, p1, p2 = _select_ps(p) # calculate r0, r1, and r2 r0_sq = _interpolate_p(p, r, v0)**2 r1_sq = _interpolate_p(p, r, v1)**2 r2_sq = _interpolate_p(p, r, v2)**2 # transform v v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2 # calculate derivatives for quadratic interpolation d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \ (r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_) if (v2_ + v0_) >= (v1_ + v1_): d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_) else: d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_) d0 = r1_sq # calculate y y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0) elif v not in v_keys+([],[1])[p>=.90]: y = _interpolate_v(p, r, v) elif p not in p_keys: y = _interpolate_p(p, r, v) return math.sqrt(2) * -y * \ scipy.stats.t.isf((1. + p) / 2., max(v, 1e38))
def _qsturng(p, r, v)
scalar version of qsturng
3.828829
3.862076
0.991391
if all(map(_isfloat, [p, r, v])): return _qsturng(p, r, v) return _vqsturng(p, r, v)
def qsturng(p, r, v)
Approximates the quantile p for a studentized range distribution having v degrees of freedom and r samples for probability p. Parameters ---------- p : (scalar, array_like) The cumulative probability value p >= .1 and p <=.999 (values under .5 are not recommended) r : (scalar, array_like) The number of samples r >= 2 and r <= 200 (values over 200 are permitted but not recommended) v : (scalar, array_like) The sample degrees of freedom if p >= .9: v >=1 and v >= inf else: v >=2 and v >= inf Returns ------- q : (scalar, array_like) approximation of the Studentized Range
4.22055
5.961322
0.707989
if q < 0.: raise ValueError('q should be >= 0') opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q) if v == 1: if q < _qsturng(.9, r, 1): return .1 elif q > _qsturng(.999, r, 1): return .001 return 1. - fminbound(opt_func, .9, .999, args=(r,v)) else: if q < _qsturng(.1, r, v): return .9 elif q > _qsturng(.999, r, v): return .001 return 1. - fminbound(opt_func, .1, .999, args=(r,v))
def _psturng(q, r, v)
scalar version of psturng
2.587962
2.603131
0.994173
if all(map(_isfloat, [q, r, v])): return _psturng(q, r, v) return _vpsturng(q, r, v)
def psturng(q, r, v)
Evaluates the probability from 0 to q for a studentized range having v degrees of freedom and r samples. Parameters ---------- q : (scalar, array_like) quantile value of Studentized Range q >= 0. r : (scalar, array_like) The number of samples r >= 2 and r <= 200 (values over 200 are permitted but not recommended) v : (scalar, array_like) The sample degrees of freedom if p >= .9: v >=1 and v >= inf else: v >=2 and v >= inf Returns ------- p : (scalar, array_like) 1. - area from zero to q under the Studentized Range distribution. When v == 1, p is bound between .001 and .1, when v > 1, p is bound between .001 and .9. Values between .5 and .9 are 1st order appoximations.
4.242949
7.722878
0.5494
if self._consumer_fn is not None: raise ValueError('Consumer function is already defined for this ' 'Stream instance') if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]): raise ValueError('Consumer function must be a coroutine') self._consumer_fn = fn
def consumer(self, fn)
Consumer decorator :param fn: coroutine consumer function Example: >>> api = StreamingAPI('my_service_key') >>> stream = api.get_stream() >>> @stream.consumer >>> @asyncio.coroutine >>> def handle_event(payload): >>> print(payload)
3.60603
3.701343
0.974249
if self._consumer_fn is None: raise ValueError('Consumer function is not defined yet') logger.info('Start consuming the stream') @asyncio.coroutine def worker(conn_url): extra_headers = { 'Connection': 'upgrade', 'Upgrade': 'websocket', 'Sec-Websocket-Version': 13, } ws = yield from websockets.connect( conn_url, extra_headers=extra_headers) if ws is None: raise RuntimeError("Couldn't connect to the '%s'" % conn_url) try: while True: message = yield from ws.recv() yield from self._consumer_fn(message) finally: yield from ws.close() if loop is None: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: task = worker(conn_url=self._conn_url) if timeout: logger.info('Running task with timeout %s sec', timeout) loop.run_until_complete( asyncio.wait_for(task, timeout=timeout)) else: loop.run_until_complete(task) except asyncio.TimeoutError: logger.info('Timeout is reached. Closing the loop') loop.close() except KeyboardInterrupt: logger.info('Closing the loop') loop.close()
def consume(self, timeout=None, loop=None)
Start consuming the stream :param timeout: int: if it's given then it stops consumer after given number of seconds
2.492908
2.49255
1.000143
resp = requests.post(url=self.REQUEST_URL.format(**self._params), json={'rule': {'value': value, 'tag': tag}}) return resp.json()
def add_rule(self, value, tag)
Add a new rule :param value: str :param tag: str :return: dict of a json response
5.431916
5.405386
1.004908
resp = requests.delete(url=self.REQUEST_URL.format(**self._params), json={'tag': tag}) return resp.json()
def remove_rule(self, tag)
Remove a rule by tag
7.831454
7.610659
1.029011
if not isinstance(data, dict): raise ValueError('Data must be dict. %r is passed' % data) values_dict = {} for key, value in data.items(): items = [] if isinstance(value, six.string_types): items.append(value) elif isinstance(value, Iterable): for v in value: # Convert to str int values if isinstance(v, int): v = str(v) try: item = six.u(v) except TypeError: item = v items.append(item) value = ','.join(items) values_dict[key] = value return values_dict
def stringify_values(data)
Coerce iterable values to 'val1,val2,valN' Example: fields=['nickname', 'city', 'can_see_all_posts'] --> fields='nickname,city,can_see_all_posts' :param data: dict :return: converted values dict
2.836832
2.831727
1.001803
parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) # login_response_url_query can have multiple key url_query = dict(url_query) return url_query
def parse_url_query_params(url, fragment=True)
Parse url query params :param fragment: bool: flag is used for parsing oauth url :param url: str: url string :return: dict
3.225599
3.519151
0.916584
if parser is None: parser = bs4.BeautifulSoup(html, 'html.parser') forms = parser.find_all('form') if not forms: raise VkParseError('Action form is not found in the html \n%s' % html) if len(forms) > 1: raise VkParseError('Find more than 1 forms to handle:\n%s', forms) form = forms[0] return form.get('action')
def parse_form_action_url(html, parser=None)
Parse <form action="(.+)"> url :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :return: url str: for example: /login.php?act=security_check&to=&hash=12346
3.412153
3.502926
0.974086
if parser is None: parser = bs4.BeautifulSoup(html, 'html.parser') fields = parser.find_all('span', {'class': 'field_prefix'}) if not fields: raise VkParseError( 'No <span class="field_prefix">...</span> in the \n%s' % html) result = [] for f in fields: value = f.get_text().replace(six.u('\xa0'), '') result.append(value) return tuple(result)
def parse_masked_phone_number(html, parser=None)
Get masked phone number from security check html :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :return: tuple of phone prefix and suffix, for example: ('+1234', '89') :rtype : tuple
3.416479
3.375423
1.012163
if parser is None: parser = bs4.BeautifulSoup(html, 'html.parser') # Check warnings warnings = parser.find_all('div', {'class': 'service_msg_warning'}) if warnings: raise VkPageWarningsError('; '.join([w.get_text() for w in warnings])) return True
def check_html_warnings(html, parser=None)
Check html warnings :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :raise VkPageWarningsError: in case of found warnings
4.057122
2.866462
1.415376
if self._http_session is None: session = VerboseHTTPSession() session.headers.update(self.DEFAULT_HTTP_HEADERS) self._http_session = session return self._http_session
def http_session(self)
HTTP Session property :return: vk_requests.utils.VerboseHTTPSession instance
3.594725
2.652027
1.355463
response = http_session.get(self.LOGIN_URL) action_url = parse_form_action_url(response.text) # Stop login it action url is not found if not action_url: logger.debug(response.text) raise VkParseError("Can't parse form action url") login_form_data = {'email': self._login, 'pass': self._password} login_response = http_session.post(action_url, login_form_data) logger.debug('Cookies: %s', http_session.cookies) response_url_query = parse_url_query_params( login_response.url, fragment=False) logger.debug('response_url_query: %s', response_url_query) act = response_url_query.get('act') # Check response url query params firstly if 'sid' in response_url_query: self.require_auth_captcha( response=login_response, query_params=response_url_query, login_form_data=login_form_data, http_session=http_session) elif act == 'authcheck': self.require_2fa(html=login_response.text, http_session=http_session) elif act == 'security_check': self.require_phone_number(html=login_response.text, session=http_session) session_cookies = ('remixsid' in http_session.cookies, 'remixsid6' in http_session.cookies) if any(session_cookies): logger.info('VK session is established') return True else: message = 'Authorization error: incorrect password or ' \ 'authentication code' logger.error(message) raise VkAuthError(message)
def do_login(self, http_session)
Do vk login :param http_session: vk_requests.utils.VerboseHTTPSession: http session
3.286106
3.211532
1.023221
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id) auth_data = { 'client_id': self.app_id, 'display': 'mobile', 'response_type': 'token', 'scope': self.scope, 'redirect_uri': 'https://oauth.vk.com/blank.html', 'v': self.api_version } response = session.post(url=self.AUTHORIZE_URL, data=stringify_values(auth_data)) url_query_params = parse_url_query_params(response.url) if 'expires_in' in url_query_params: logger.info('Token will be expired in %s sec.' % url_query_params['expires_in']) if 'access_token' in url_query_params: return url_query_params # Permissions are needed logger.info('Getting permissions') action_url = parse_form_action_url(response.text) logger.debug('Response form action: %s', action_url) if action_url: response = session.get(action_url) url_query_params = parse_url_query_params(response.url) return url_query_params try: response_json = response.json() except ValueError: # not JSON in response error_message = 'OAuth2 grant access error' logger.error(response.text) else: error_message = 'VK error: [{}] {}'.format( response_json['error'], response_json['error_description']) logger.error('Permissions obtained') raise VkAuthError(error_message)
def do_implicit_flow_authorization(self, session)
Standard OAuth2 authorization method. It's used for getting access token More info: https://vk.com/dev/implicit_flow_user
2.930217
2.760979
1.061296
logger.info('Doing direct authorization, app_id=%s', self.app_id) auth_data = { 'client_id': self.app_id, 'client_secret': self._client_secret, 'username': self._login, 'password': self._password, 'grant_type': 'password', '2fa_supported': self._two_fa_supported, 'scope': self.scope, 'v': self.api_version } response = session.post(url=self.DIRECT_AUTHORIZE_URL, data=stringify_values(auth_data)) try: response_json = response.json() except ValueError: # not JSON in response error_message = 'OAuth2 grant access error' logger.error(response.text) raise VkAuthError(error_message) else: if 'access_token' in response_json: return response_json if response_json['error'] == 'need_validation': return self.direct_auth_require_2fa(session, auth_data) elif response_json['error'] == 'need_captcha': return self.direct_auth_require_captcha(session, response_json, auth_data) else: error_message = 'VK error: [{}] {}'.format( response_json['error'], response_json['error_description']) raise VkAuthError(error_message)
def do_direct_authorization(self, session)
Direct Authorization, more info: https://vk.com/dev/auth_direct
2.810893
2.547714
1.1033
logger.info('Captcha is needed. Query params: %s', query_params) form_text = response.text action_url = parse_form_action_url(form_text) logger.debug('form action url: %s', action_url) if not action_url: raise VkAuthError('Cannot find form action url') captcha_sid, captcha_url = parse_captcha_html( html=response.text, response_url=response.url) logger.info('Captcha url %s', captcha_url) login_form_data['captcha_sid'] = captcha_sid login_form_data['captcha_key'] = self.get_captcha_key(captcha_url) response = http_session.post(action_url, login_form_data) return response
def require_auth_captcha(self, response, query_params, login_form_data, http_session)
Resolve auth captcha case :param response: http response :param query_params: dict: response query params, for example: {'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'} :param login_form_data: dict :param http_session: requests.Session :return: :raise VkAuthError:
2.61096
2.561393
1.019352
if self._service_token: logger.info('Use service token: %s', 5 * '*' + self._service_token[50:]) return self._service_token if not all([self.app_id, self._login, self._password]): raise ValueError( 'app_id=%s, login=%s password=%s (masked) must be given' % (self.app_id, self._login, '*' * len(self._password) if self._password else 'None')) logger.info("Getting access token for user '%s'" % self._login) with self.http_session as s: if self._client_secret: url_query_params = self.do_direct_authorization(session=s) else: self.do_login(http_session=s) url_query_params = self.do_implicit_flow_authorization(session=s) logger.debug('url_query_params: %s', url_query_params) if 'access_token' in url_query_params: logger.info('Access token has been gotten') return url_query_params['access_token'] else: raise VkAuthError('OAuth2 authorization error. Url params: %s' % url_query_params)
def _get_access_token(self)
Get access token using app_id, login and password OR service token (service token docs: https://vk.com/dev/service_token
3.691658
3.2966
1.119838
if self.interactive: print('Open CAPTCHA image url in your browser and enter it below: ', captcha_image_url) captcha_key = raw_input('Enter CAPTCHA key: ') return captcha_key else: raise VkAuthError( 'Captcha is required. Use interactive mode to enter it ' 'manually')
def get_captcha_key(self, captcha_image_url)
Read CAPTCHA key from user input
5.421278
4.923237
1.101161
logger.debug('Prepare API Method request %r', request) response = self._send_api_request(request=request, captcha_response=captcha_response) response.raise_for_status() response_or_error = json.loads(response.text) logger.debug('response: %s', response_or_error) if 'error' in response_or_error: error_data = response_or_error['error'] vk_error = VkAPIError(error_data) if vk_error.is_captcha_needed(): captcha_key = self.get_captcha_key(vk_error.captcha_img_url) if not captcha_key: raise vk_error # Retry http request with captcha info attached captcha_response = { 'sid': vk_error.captcha_sid, 'key': captcha_key, } return self.make_request( request, captcha_response=captcha_response) elif vk_error.is_access_token_incorrect(): logger.info( 'Authorization failed. Access token will be dropped') self._access_token = None return self.make_request(request) else: raise vk_error elif 'execute_errors' in response_or_error: # can take place while running .execute vk method # See more: https://vk.com/dev/execute raise VkAPIError(response_or_error['execute_errors'][0]) elif 'response' in response_or_error: return response_or_error['response']
def make_request(self, request, captcha_response=None)
Make api request helper function :param request: vk_requests.api.Request instance :param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>} :return: dict: json decoded http response
2.970418
2.868356
1.035582
url = self.API_URL + request.method_name # Prepare request arguments method_kwargs = {'v': self.api_version} # Shape up the request data for values in (request.method_args,): method_kwargs.update(stringify_values(values)) if self.is_token_required() or self._service_token: # Auth api call if access_token hadn't been gotten earlier method_kwargs['access_token'] = self.access_token if captcha_response: method_kwargs['captcha_sid'] = captcha_response['sid'] method_kwargs['captcha_key'] = captcha_response['key'] http_params = dict(url=url, data=method_kwargs, **request.http_params) logger.debug('send_api_request:http_params: %s', http_params) response = self.http_session.post(**http_params) return response
def _send_api_request(self, request, captcha_response=None)
Prepare and send HTTP API request :param request: vk_requests.api.Request instance :param captcha_response: None or dict :return: HTTP response
4.520301
4.148309
1.089673
session = VKSession(app_id=app_id, user_login=login, user_password=password, phone_number=phone_number, scope=scope, service_token=service_token, api_version=api_version, interactive=interactive, client_secret=client_secret, two_fa_supported = two_fa_supported, two_fa_force_sms=two_fa_force_sms) return API(session=session, http_params=http_params)
def create_api(app_id=None, login=None, password=None, phone_number=None, scope='offline', api_version='5.92', http_params=None, interactive=False, service_token=None, client_secret=None, two_fa_supported=False, two_fa_force_sms=False)
Factory method to explicitly create API with app_id, login, password and phone_number parameters. If the app_id, login, password are not passed, then token-free session will be created automatically :param app_id: int: vk application id, more info: https://vk.com/dev/main :param login: str: vk login :param password: str: vk password :param phone_number: str: phone number with country code (+71234568990) :param scope: str or list of str: vk session scope :param api_version: str: vk api version, check https://vk.com/dev/versions :param interactive: bool: flag which indicates to use InteractiveVKSession :param service_token: str: new way of querying vk api, instead of getting oauth token :param http_params: dict: requests http parameters passed along :param client_secret: str: secure application key for Direct Authorization, more info: https://vk.com/dev/auth_direct :param two_fa_supported: bool: enable two-factor authentication for Direct Authorization, more info: https://vk.com/dev/auth_direct :param two_fa_force_sms: bool: force SMS two-factor authentication for Direct Authorization if two_fa_supported is True, more info: https://vk.com/dev/auth_direct :return: api instance :rtype : vk_requests.api.API
1.78163
1.947158
0.91499
if self._process_result: self._result = self._process_result(value) self._raw_result = value
def result(self, value)
The result of the command.
5.998717
5.87366
1.021291
path = '/'.join(str(v) for v in self._path) return 'coaps://{}:5684/{}'.format(host, path)
def url(self, host)
Generate url for coap client.
4.941291
3.46072
1.427822
for k, v in a.items(): if isinstance(v, dict): item = b.setdefault(k, {}) self._merge(v, item) elif isinstance(v, list): item = b.setdefault(k, [{}]) if len(v) == 1 and isinstance(v[0], dict): self._merge(v[0], item[0]) else: b[k] = v else: b[k] = v return b
def _merge(self, a, b)
Merges a into b.
1.811612
1.74703
1.036967
if command2 is None: return self._data = self._merge(command2._data, self._data)
def combine_data(self, command2)
Combines the data for this command with another.
5.635996
4.370011
1.289698
try: with open(filename, encoding='utf-8') as fdesc: return json.loads(fdesc.read()) except FileNotFoundError: # This is not a fatal error _LOGGER.debug('JSON file not found: %s', filename) except ValueError as error: _LOGGER.exception('Could not parse JSON content: %s', filename) raise PytradfriError(error) except OSError as error: _LOGGER.exception('JSON file reading failed: %s', filename) raise PytradfriError(error) return {}
def load_json(filename: str) -> Union[List, Dict]
Load JSON data from a file and return as dict or list. Defaults to returning empty dict if file is not found.
2.730969
2.80824
0.972484
try: data = json.dumps(config, sort_keys=True, indent=4) with open(filename, 'w', encoding='utf-8') as fdesc: fdesc.write(data) return True except TypeError as error: _LOGGER.exception('Failed to serialize to JSON: %s', filename) raise PytradfriError(error) except OSError as error: _LOGGER.exception('Saving JSON file failed: %s', filename) raise PytradfriError(error)
def save_json(filename: str, config: Union[List, Dict])
Save JSON data to a file. Returns True on success.
2.788369
2.631617
1.059565
return [k for k, b in self._lookup.items() if b & selection]
def get_selected_keys(self, selection)
Return a list of keys for the given selection.
8.932598
6.595808
1.354284
return [v for b, v in self._choices if b & selection]
def get_selected_values(self, selection)
Return a list of values for the given selection.
14.955264
11.215895
1.333399
output = output.strip() _LOGGER.debug('Received: %s', output) if not output: return None elif 'decrypt_verify' in output: raise RequestError( 'Please compile coap-client without debug output. See ' 'instructions at ' 'https://github.com/ggravlingen/pytradfri#installation') elif output.startswith(CLIENT_ERROR_PREFIX): raise ClientError(output) elif output.startswith(SERVER_ERROR_PREFIX): raise ServerError(output) elif not parse_json: return output return json.loads(output)
def _process_output(output, parse_json=True)
Process output.
5.036912
4.874663
1.033284
@wraps(api) def retry_api(*args, **kwargs): for i in range(1, retries + 1): try: return api(*args, **kwargs) except RequestTimeout: if i == retries: raise return retry_api
def retry_timeout(api, retries=3)
Retry API call when a timeout occurs.
2.452056
2.263923
1.0831
if api_command.observe: self._observe(api_command) return method = api_command.method path = api_command.path data = api_command.data parse_json = api_command.parse_json url = api_command.url(self._host) proc_timeout = self._timeout if timeout is not None: proc_timeout = timeout command = self._base_command(method) kwargs = { 'stderr': subprocess.DEVNULL, 'timeout': proc_timeout, 'universal_newlines': True, } if data is not None: kwargs['input'] = json.dumps(data) command.append('-f') command.append('-') _LOGGER.debug('Executing %s %s %s: %s', self._host, method, path, data) else: _LOGGER.debug('Executing %s %s %s', self._host, method, path) command.append(url) try: return_value = subprocess.check_output(command, **kwargs) except subprocess.TimeoutExpired: raise RequestTimeout() from None except subprocess.CalledProcessError as err: raise RequestError( 'Error executing request: {}'.format(err)) from None api_command.result = _process_output(return_value, parse_json) return api_command.result
def _execute(self, api_command, *, timeout=None)
Execute the command.
2.578862
2.525974
1.020938
if not isinstance(api_commands, list): return self._execute(api_commands, timeout=timeout) command_results = [] for api_command in api_commands: result = self._execute(api_command, timeout=timeout) command_results.append(result) return command_results
def request(self, api_commands, *, timeout=None)
Make a request. Timeout is in seconds.
2.22601
2.083992
1.068147
path = api_command.path duration = api_command.observe_duration if duration <= 0: raise ValueError("Observation duration has to be greater than 0.") url = api_command.url(self._host) err_callback = api_command.err_callback command = (self._base_command('get') + ['-s', str(duration), '-B', str(duration), url]) kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.DEVNULL, 'universal_newlines': True } try: proc = subprocess.Popen(command, **kwargs) except subprocess.CalledProcessError as err: raise RequestError( 'Error executing request: %s'.format(err)) from None output = '' open_obj = 0 start = time() for data in iter(lambda: proc.stdout.read(1), ''): if data == '\n': _LOGGER.debug('Observing stopped for %s after %.1fs', path, time() - start) err_callback(RequestError("Observing stopped.")) break if data == '{': open_obj += 1 elif data == '}': open_obj -= 1 output += data if open_obj == 0: api_command.result = _process_output(output) output = ''
def _observe(self, api_command)
Observe an endpoint.
3.511969
3.414367
1.028586
if not self._psk: # Backup the real identity. existing_psk_id = self._psk_id # Set the default identity and security key for generation. self._psk_id = 'Client_identity' self._psk = security_key # Ask the Gateway to generate the psk for the identity. self._psk = self.request(Gateway().generate_psk(existing_psk_id)) # Restore the real identity. self._psk_id = existing_psk_id return self._psk
def generate_psk(self, security_key)
Generate and set a psk from the security key.
4.508446
4.34705
1.037128
return datetime.time( self.task_start_parameters[ ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR], self.task_start_parameters[ ATTR_SMART_TASK_TRIGGER_TIME_START_MIN])
def task_start_time(self)
Return the time the task starts. Time is set according to iso8601.
5.035065
4.949285
1.017332
return TaskControl( self, self.state, self.path, self._gateway)
def task_control(self)
Method to control a task.
17.442301
15.421797
1.131016
return [StartActionItem( self._task, i, self.state, self.path, self.raw) for i in range(len(self.raw))]
def tasks(self)
Return task objects of the task control.
11.722322
10.533955
1.112813
# This is to calculate the difference between local time # and the time in the gateway d1 = self._gateway.get_gateway_info().current_time d2 = dt.utcnow() diff = d1 - d2 newtime = dt(100, 1, 1, hour, minute, 00) - diff command = { ATTR_SMART_TASK_TRIGGER_TIME_INTERVAL: [{ ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR: newtime.hour, ATTR_SMART_TASK_TRIGGER_TIME_START_MIN: newtime.minute }] } return self._task.set_values(command)
def set_dimmer_start_time(self, hour, minute)
Set start time for task (hh:mm) in iso8601. NB: dimmer starts 30 mins before time in app
4.753451
4.765817
0.997405
return [StartActionItem( self.start_action, i, self.state, self.path, self.raw) for i in range( len(self.raw[ROOT_START_ACTION]))]
def devices(self)
Return state of start action task.
11.430624
7.929393
1.441551
json_list = {} z = 0 for x in self._raw[ROOT_START_ACTION]: if z != self.index: json_list.update(x) z = z + 1 return json_list
def devices_dict(self)
Return state of start action task.
8.759686
6.782763
1.291463
return StartActionItemController( self, self.raw, self.state, self.path, self.devices_dict)
def item_controller(self)
Method to control a task.
18.329113
16.329796
1.122434
command = { ATTR_START_ACTION: { ATTR_DEVICE_STATE: self.state, ROOT_START_ACTION: [{ ATTR_ID: self.raw[ATTR_ID], ATTR_LIGHT_DIMMER: dimmer, ATTR_TRANSITION_TIME: self.raw[ATTR_TRANSITION_TIME] }, self.devices_dict] } } return self.set_values(command)
def set_dimmer(self, dimmer)
Set final dimmer value for task.
6.041952
5.868448
1.029566
command = { ATTR_START_ACTION: { ATTR_DEVICE_STATE: self.state, ROOT_START_ACTION: [{ ATTR_ID: self.raw[ATTR_ID], ATTR_LIGHT_DIMMER: self.raw[ATTR_LIGHT_DIMMER], ATTR_TRANSITION_TIME: transition_time * 10 * 60 }, self.devices_dict] } } return self.set_values(command)
def set_transition_time(self, transition_time)
Set time (mins) for light transition.
6.247237
5.684238
1.099046
def observe_callback(value): self.raw = value callback(self) return Command('get', self.path, process_result=observe_callback, err_callback=err_callback, observe=True, observe_duration=duration)
def observe(self, callback, err_callback, duration=60)
Observe resource and call callback when updated.
6.80237
6.123836
1.110802
def process_result(result): self.raw = result return Command('get', self.path, process_result=process_result)
def update(self)
Update the group. Returns a Command.
10.080923
9.733917
1.035649
def process_result(result): return result[ATTR_PSK] return Command('post', [ROOT_GATEWAY, ATTR_AUTH], { ATTR_IDENTITY: identity }, process_result=process_result)
def generate_psk(self, identity)
Generates the PRE_SHARED_KEY from the gateway. Returns a Command.
12.611272
9.796833
1.28728
def process_result(result): return [line.split(';')[0][2:-1] for line in result.split(',')] return Command('get', ['.well-known', 'core'], parse_json=False, process_result=process_result)
def get_endpoints(self)
Return all available endpoints on the gateway. Returns a Command.
8.277596
7.319159
1.130949
def process_result(result): return [self.get_device(dev) for dev in result] return Command('get', [ROOT_DEVICES], process_result=process_result)
def get_devices(self)
Return the devices linked to the gateway. Returns a Command.
8.475373
7.646162
1.108448
def process_result(result): return Device(result) return Command('get', [ROOT_DEVICES, device_id], process_result=process_result)
def get_device(self, device_id)
Return specified device. Returns a Command.
10.300258
9.698241
1.062075
def process_result(result): return [self.get_group(group) for group in result] return Command('get', [ROOT_GROUPS], process_result=process_result)
def get_groups(self)
Return the groups linked to the gateway. Returns a Command.
8.620298
7.584568
1.136558
def process_result(result): return Group(self, result) return Command('get', [ROOT_GROUPS, group_id], process_result=process_result)
def get_group(self, group_id)
Return specified group. Returns a Command.
9.293819
9.567601
0.971384
def process_result(result): return GatewayInfo(result) return Command('get', [ROOT_GATEWAY, ATTR_GATEWAY_INFO], process_result=process_result)
def get_gateway_info(self)
Return the gateway info. Returns a Command.
11.398709
9.516211
1.19782
mood_parent = self._get_mood_parent() def process_result(result): return [self.get_mood(mood, mood_parent=mood_parent) for mood in result] return Command('get', [ROOT_MOODS, mood_parent], process_result=process_result)
def get_moods(self)
Return moods defined on the gateway. Returns a Command.
5.708956
5.086179
1.122445
if mood_parent is None: mood_parent = self._get_mood_parent() def process_result(result): return Mood(result, mood_parent) return Command('get', [ROOT_MOODS, mood_parent, mood_id], mood_parent, process_result=process_result)
def get_mood(self, mood_id, *, mood_parent=None)
Return a mood. Returns a Command.
4.269593
3.934708
1.085111
def process_result(result): return [self.get_smart_task(task) for task in result] return Command('get', [ROOT_SMART_TASKS], process_result=process_result)
def get_smart_tasks(self)
Return the transitions linked to the gateway. Returns a Command.
7.285981
6.87191
1.060256
def process_result(result): return SmartTask(self, result) return Command('get', [ROOT_SMART_TASKS, task_id], process_result=process_result)
def get_smart_task(self, task_id)
Return specified transition. Returns a Command.
8.039356
8.277949
0.971177
if ATTR_FIRST_SETUP not in self.raw: return None return datetime.utcfromtimestamp(self.raw[ATTR_FIRST_SETUP])
def first_setup(self)
This is a guess of the meaning of this value.
5.068933
4.001746
1.26668
if DeviceInfo.ATTR_POWER_SOURCE not in self.raw: return None return DeviceInfo.VALUE_POWER_SOURCES.get(self.power_source, 'Unknown')
def power_source_str(self)
String representation of current power source.
7.028918
6.134669
1.14577
return [Light(self._device, i) for i in range(len(self.raw))]
def lights(self)
Return light objects of the light control.
9.83152
7.199836
1.36552
return self.set_values({ ATTR_DEVICE_STATE: int(state) }, index=index)
def set_state(self, state, *, index=0)
Set state of a light.
8.374441
6.743827
1.241794
self._value_validate(dimmer, RANGE_BRIGHTNESS, "Dimmer") values = { ATTR_LIGHT_DIMMER: dimmer } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values, index=index)
def set_dimmer(self, dimmer, *, index=0, transition_time=None)
Set dimmer value of a light. transition_time: Integer representing tenth of a second (default None)
3.459108
3.459369
0.999925
self._value_validate(color_temp, RANGE_MIREDS, "Color temperature") values = { ATTR_LIGHT_MIREDS: color_temp } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values, index=index)
def set_color_temp(self, color_temp, *, index=0, transition_time=None)
Set color temp a light.
3.85257
3.572184
1.078492
values = { ATTR_LIGHT_COLOR_HEX: color, } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values, index=index)
def set_hex_color(self, color, *, index=0, transition_time=None)
Set hex color of the light.
3.018871
2.66609
1.132322
self._value_validate(color_x, RANGE_X, "X color") self._value_validate(color_y, RANGE_Y, "Y color") values = { ATTR_LIGHT_COLOR_X: color_x, ATTR_LIGHT_COLOR_Y: color_y } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values, index=index)
def set_xy_color(self, color_x, color_y, *, index=0, transition_time=None)
Set xy color of the light.
2.435217
2.252311
1.081208
self._value_validate(hue, RANGE_HUE, "Hue") self._value_validate(saturation, RANGE_SATURATION, "Saturation") values = { ATTR_LIGHT_COLOR_SATURATION: saturation, ATTR_LIGHT_COLOR_HUE: hue } if brightness is not None: values[ATTR_LIGHT_DIMMER] = brightness self._value_validate(brightness, RANGE_BRIGHTNESS, "Brightness") if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values, index=index)
def set_hsb(self, hue, saturation, brightness=None, *, index=0, transition_time=None)
Set HSB color settings of the light.
2.218825
2.104539
1.054304
if value is not None and (value < rnge[0] or value > rnge[1]): raise ValueError('%s value must be between %d and %d.' % (identifier, rnge[0], rnge[1]))
def _value_validate(self, value, rnge, identifier="Given")
Make sure a value is within a given range
2.343742
2.249189
1.042039
assert len(self.raw) == 1, \ 'Only devices with 1 light supported' return Command('put', self._device.path, { ATTR_LIGHT_CONTROL: [ values ] })
def set_values(self, values, *, index=0)
Set values on light control. Returns a Command.
17.206543
11.059608
1.5558