code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._check_estimated() return self._rc.cov_YY(bessel=self.bessel)
def Ctt_(self)
Covariance matrix of the time shifted data
53.507225
36.653122
1.459827
if self._default_chunksize is None: try: # TODO: if dimension is not yet fixed (eg tica var cutoff, use dim of data_producer. self.dimension() self.output_type() except: self._default_chunksize = Iterable._FALLBACK_CHUNKSIZE else: self._default_chunksize = Iterable._compute_default_cs(self.dimension(), self.output_type().itemsize, self.logger) return self._default_chunksize
def default_chunksize(self)
How much data will be processed at once, in case no chunksize has been provided. Notes ----- This variable respects your setting for maximum memory in pyemma.config.default_chunksize
10.843932
11.03409
0.982766
if self.in_memory: from pyemma.coordinates.data.data_in_memory import DataInMemory return DataInMemory(self._Y).iterator( lag=lag, chunk=chunk, stride=stride, return_trajindex=return_trajindex, skip=skip ) chunk = chunk if chunk is not None else self.chunksize if lag > 0: if chunk == 0 or lag <= chunk: it = self._create_iterator(skip=skip, chunk=chunk, stride=1, return_trajindex=return_trajindex, cols=cols) it.return_traj_index = True return _LaggedIterator(it, lag, return_trajindex, stride) else: it = self._create_iterator(skip=skip, chunk=chunk, stride=stride, return_trajindex=return_trajindex, cols=cols) it.return_traj_index = True it_lagged = self._create_iterator(skip=skip + lag, chunk=chunk, stride=stride, return_trajindex=True, cols=cols) return _LegacyLaggedIterator(it, it_lagged, return_trajindex) return self._create_iterator(skip=skip, chunk=chunk, stride=stride, return_trajindex=return_trajindex, cols=cols)
def iterator(self, stride=1, lag=0, chunk=None, return_trajindex=True, cols=None, skip=0)
creates an iterator to stream over the (transformed) data. If your data is too large to fit into memory and you want to incrementally compute some quantities on it, you can create an iterator on a reader or transformer (eg. TICA) to avoid memory overflows. Parameters ---------- stride : int, default=1 Take only every stride'th frame. lag: int, default=0 how many frame to omit for each file. chunk: int, default=None How many frames to process at once. If not given obtain the chunk size from the source. return_trajindex: boolean, default=True a chunk of data if return_trajindex is False, otherwise a tuple of (trajindex, data). cols: array like, default=None return only the given columns. skip: int, default=0 skip 'n' first frames of each trajectory. Returns ------- iter : instance of DataSourceIterator a implementation of a DataSourceIterator to stream over the data Examples -------- >>> from pyemma.coordinates import source; import numpy as np >>> data = [np.arange(3), np.arange(4, 7)] >>> reader = source(data) >>> iterator = reader.iterator(chunk=1) >>> for array_index, chunk in iterator: ... print(array_index, chunk) 0 [[0]] 0 [[1]] 0 [[2]] 1 [[4]] 1 [[5]] 1 [[6]]
2.442713
2.623856
0.930963
alpha = self.alpha if alpha <= 0: raise ValueError("alpha should be >0, got {0!r}".format(alpha)) X = atleast2d_or_csr(X) classes, y = np.unique(y, return_inverse=True) lengths = np.asarray(lengths) Y = y.reshape(-1, 1) == np.arange(len(classes)) end = np.cumsum(lengths) start = end - lengths init_prob = np.log(Y[start].sum(axis=0) + alpha) init_prob -= logsumexp(init_prob) final_prob = np.log(Y[start].sum(axis=0) + alpha) final_prob -= logsumexp(final_prob) feature_prob = np.log(safe_sparse_dot(Y.T, X) + alpha) feature_prob -= logsumexp(feature_prob, axis=0) trans_prob = np.log(count_trans(y, len(classes)) + alpha) trans_prob -= logsumexp(trans_prob, axis=0) self.coef_ = feature_prob self.intercept_init_ = init_prob self.intercept_final_ = final_prob self.intercept_trans_ = trans_prob self.classes_ = classes return self
def fit(self, X, y, lengths)
Fit HMM model to data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Feature matrix of individual samples. y : array-like, shape (n_samples,) Target labels. lengths : array-like of integers, shape (n_sequences,) Lengths of the individual sequences in X, y. The sum of these should be n_samples. Notes ----- Make sure the training set (X) is one-hot encoded; if more than one feature in X is on, the emission probabilities will be multiplied. Returns ------- self : MultinomialHMM
2.623523
2.77847
0.944233
for f, sha1 in files: yield "100644 blob {}\t{}\0".format(sha1, f) for d, sha1 in dirs: yield "040000 tree {}\t{}\0".format(sha1, d)
def _lstree(files, dirs)
Make git ls-tree like output.
3.276724
2.746157
1.193203
dir_hash = {} for root, dirs, files in os.walk(path, topdown=False): f_hash = ((f, hash_file(join(root, f))) for f in files) d_hash = ((d, dir_hash[join(root, d)]) for d in dirs) # split+join normalizes paths on Windows (note the imports) dir_hash[join(*split(root))] = _mktree(f_hash, d_hash) return dir_hash[path]
def hash_dir(path)
Write directory at path to Git index, return its SHA1 as a string.
4.44608
4.265459
1.042345
word = sentence[i] yield "word:{}" + word.lower() if word[0].isupper(): yield "CAP" if i > 0: yield "word-1:{}" + sentence[i - 1].lower() if i > 1: yield "word-2:{}" + sentence[i - 2].lower() if i + 1 < len(sentence): yield "word+1:{}" + sentence[i + 1].lower() if i + 2 < len(sentence): yield "word+2:{}" + sentence[i + 2].lower()
def features(sentence, i)
Features for i'th token in sentence. Currently baseline named-entity recognition features, but these can easily be changed to do POS tagging or chunking.
2.199199
2.208223
0.995913
if len(y_true) != len(y_pred): msg = "Sequences not of the same length ({} != {}).""" raise ValueError(msg.format(len(y_true), len(y_pred))) y_true = np.asarray(y_true) y_pred = np.asarray(y_pred) is_b = partial(np.char.startswith, prefix="B") where = np.where t_starts = where(is_b(y_true))[0] p_starts = where(is_b(y_pred))[0] # These lengths are off-by-one because we skip the "B", but that's ok. # http://stackoverflow.com/q/17929499/166749 t_lengths = np.diff(where(is_b(np.r_[y_true[y_true != 'O'], ['B']]))[0]) p_lengths = np.diff(where(is_b(np.r_[y_pred[y_pred != 'O'], ['B']]))[0]) t_segments = set(zip(t_starts, t_lengths, y_true[t_starts])) p_segments = set(zip(p_starts, p_lengths, y_pred[p_starts])) # tp = len(t_segments & p_segments) # fn = len(t_segments - p_segments) # fp = len(p_segments - t_segments) tp = sum(x in t_segments for x in p_segments) fn = sum(x not in p_segments for x in t_segments) fp = sum(x not in t_segments for x in p_segments) if tp == 0: # special-cased like this in CoNLL evaluation return 0. precision = tp / float(tp + fp) recall = tp / float(tp + fn) return 2. * precision * recall / (precision + recall)
def bio_f_score(y_true, y_pred)
F-score for BIO-tagging scheme, as used by CoNLL. This F-score variant is used for evaluating named-entity recognition and related problems, where the goal is to predict segments of interest within sequences and mark these as a "B" (begin) tag followed by zero or more "I" (inside) tags. A true positive is then defined as a BI* segment in both y_true and y_pred, with false positives and false negatives defined similarly. Support for tags schemes with classes (e.g. "B-NP") are limited: reported scores may be too high for inconsistent labelings. Parameters ---------- y_true : array-like of strings, shape (n_samples,) Ground truth labeling. y_pred : array-like of strings, shape (n_samples,) Sequence classifier's predictions. Returns ------- f : float F-score.
2.489401
2.371407
1.049757
lengths = np.asarray(lengths) end = np.cumsum(lengths) start = end - lengths bounds = np.vstack([start, end]).T errors = sum(1. for i, j in bounds if np.any(y_true[i:j] != y_pred[i:j])) return 1 - errors / len(lengths)
def whole_sequence_accuracy(y_true, y_pred, lengths)
Average accuracy measured on whole sequences. Returns the fraction of sequences in y_true that occur in y_pred without a single error.
3.261131
3.462526
0.941836
fh = FeatureHasher(n_features=n_features, input_type="string") labels = [] lengths = [] with _open(f) as f: raw_X = _conll_sequences(f, features, labels, lengths, split) X = fh.transform(raw_X) return X, np.asarray(labels), np.asarray(lengths, dtype=np.int32)
def load_conll(f, features, n_features=(2 ** 16), split=False)
Load CoNLL file, extract features on the tokens and vectorize them. The ConLL file format is a line-oriented text format that describes sequences in a space-separated format, separating the sequences with blank lines. Typically, the last space-separated part is a label. Since the tab-separated parts are usually tokens (and maybe things like part-of-speech tags) rather than feature vectors, a function must be supplied that does the actual feature extraction. This function has access to the entire sequence, so that it can extract context features. A ``sklearn.feature_extraction.FeatureHasher`` (the "hashing trick") is used to map symbolic input feature names to columns, so this function dos not remember the actual input feature names. Parameters ---------- f : {string, file-like} Input file. features : callable Feature extraction function. Must take a list of tokens l that represent a single sequence and an index i into this list, and must return an iterator over strings that represent the features of l[i]. n_features : integer, optional Number of columns in the output. split : boolean, default=False Whether to split lines on whitespace beyond what is needed to parse out the labels. This is useful for CoNLL files that have extra columns containing information like part of speech tags. Returns ------- X : scipy.sparse matrix, shape (n_samples, n_features) Samples (feature vectors), as a single sparse matrix. y : np.ndarray, dtype np.string, shape n_samples Per-sample labels. lengths : np.ndarray, dtype np.int32, shape n_sequences Lengths of sequences within (X, y). The sum of these is equal to n_samples.
3.685288
4.016164
0.917614
X = atleast2d_or_csr(X) scores = safe_sparse_dot(X, self.coef_.T) if hasattr(self, "coef_trans_"): n_classes = len(self.classes_) coef_t = self.coef_trans_.T.reshape(-1, self.coef_trans_.shape[-1]) trans_scores = safe_sparse_dot(X, coef_t.T) trans_scores = trans_scores.reshape(-1, n_classes, n_classes) else: trans_scores = None decode = self._get_decoder() if lengths is None: y = decode(scores, trans_scores, self.intercept_trans_, self.intercept_init_, self.intercept_final_) else: start, end = validate_lengths(X.shape[0], lengths) y = [decode(scores[start[i]:end[i]], trans_scores, self.intercept_trans_, self.intercept_init_, self.intercept_final_) for i in six.moves.xrange(len(lengths))] y = np.hstack(y) return self.classes_[y]
def predict(self, X, lengths=None)
Predict labels/tags for samples X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Feature matrix. lengths : array-like of integer, shape (n_sequences,), optional Lengths of sequences in X. If not given, X is assumed to be a single sequence of length n_samples. Returns ------- y : array, shape (n_samples,) Labels per sample in X.
2.594239
2.792237
0.92909
return accuracy_score(y, self.predict(X, lengths))
def score(self, X, y, lengths=None)
Returns the mean accuracy on the given test data and labels. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples,) True labels for X. lengths : array-like of integer, shape (n_sequences,), optional Lengths of sequences in X. If not given, X is assumed to be a single sequence of length n_samples. Returns ------- score : float Mean accuracy of self.predict(X, lengths) wrt. y.
4.527335
6.955513
0.650899
if sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense data ' 'is required. Use X.toarray() to convert to dense.') X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order) _assert_all_finite(X_2d) if X is X_2d and copy: X_2d = safe_copy(X_2d) return X_2d
def array2d(X, dtype=None, order=None, copy=False)
Returns at least 2-d array with data from X
2.676298
2.699408
0.991439
return _atleast2d_or_sparse(X, dtype, order, copy, sp.csr_matrix, "tocsr", sp.isspmatrix_csr)
def atleast2d_or_csr(X, dtype=None, order=None, copy=False)
Like numpy.atleast_2d, but converts sparse matrices to CSR format Also, converts np.matrix to np.ndarray.
4.980998
6.199447
0.803458
if lengths is None: lengths = [n_samples] lengths = np.asarray(lengths, dtype=np.int32) if lengths.sum() > n_samples: msg = "More than {0:d} samples in lengths array {1!s}" raise ValueError(msg.format(n_samples, lengths)) end = np.cumsum(lengths) start = end - lengths return start, end
def validate_lengths(n_samples, lengths)
Validate lengths array against n_samples. Parameters ---------- n_samples : integer Total number of samples. lengths : array-like of integers, shape (n_sequences,), optional Lengths of individual sequences in the input. Returns ------- start : array of integers, shape (n_sequences,) Start indices of sequences. end : array of integers, shape (n_sequences,) One-past-the-end indices of sequences.
2.536014
2.772979
0.914545
indices = np.empty(len(y), dtype=np.int32) for i in six.moves.xrange(len(y) - 1): indices[i] = y[i] * i + y[i + 1] indptr = np.arange(len(y) + 1) indptr[-1] = indptr[-2] return csr_matrix((np.ones(len(y), dtype=dtype), indices, indptr), shape=(len(y), n_classes ** 2))
def make_trans_matrix(y, n_classes, dtype=np.float64)
Make a sparse transition matrix for y. Takes a label sequence y and returns an indicator matrix with n_classes² columns of the label transitions in y: M[i, j, k] means y[i-1] == j and y[i] == k. The first row will be empty.
2.31154
2.18734
1.056782
connector = TCPConnector() connector._resolve_host = partial(self._old_resolver_mock, connector) new_is_ssl = ClientRequest.is_ssl ClientRequest.is_ssl = self._old_is_ssl try: original_request = request.clone(scheme="https" if request.headers["AResponsesIsSSL"] else "http") headers = {k: v for k, v in request.headers.items() if k != "AResponsesIsSSL"} async with ClientSession(connector=connector) as session: async with getattr(session, request.method.lower())(original_request.url, headers=headers, data=(await request.read())) as r: headers = {k: v for k, v in r.headers.items() if k.lower() == "content-type"} text = await r.text() response = self.Response(text=text, status=r.status, headers=headers) return response finally: ClientRequest.is_ssl = new_is_ssl
async def passthrough(self, request)
Make non-mocked network request
3.502263
3.314948
1.056506
# kazoe hand if han >= 13 and not is_yakuman: # Hands over 26+ han don't count as double yakuman if config.options.kazoe_limit == HandConfig.KAZOE_LIMITED: han = 13 # Hands over 13+ is a sanbaiman elif config.options.kazoe_limit == HandConfig.KAZOE_SANBAIMAN: han = 12 if han >= 5: if han >= 78: rounded = 48000 elif han >= 65: rounded = 40000 elif han >= 52: rounded = 32000 elif han >= 39: rounded = 24000 # double yakuman elif han >= 26: rounded = 16000 # yakuman elif han >= 13: rounded = 8000 # sanbaiman elif han >= 11: rounded = 6000 # baiman elif han >= 8: rounded = 4000 # haneman elif han >= 6: rounded = 3000 else: rounded = 2000 double_rounded = rounded * 2 four_rounded = double_rounded * 2 six_rounded = double_rounded * 3 else: base_points = fu * pow(2, 2 + han) rounded = (base_points + 99) // 100 * 100 double_rounded = (2 * base_points + 99) // 100 * 100 four_rounded = (4 * base_points + 99) // 100 * 100 six_rounded = (6 * base_points + 99) // 100 * 100 is_kiriage = False if config.options.kiriage: if han == 4 and fu == 30: is_kiriage = True if han == 3 and fu == 60: is_kiriage = True # mangan if rounded > 2000 or is_kiriage: rounded = 2000 double_rounded = rounded * 2 four_rounded = double_rounded * 2 six_rounded = double_rounded * 3 if config.is_tsumo: return {'main': double_rounded, 'additional': config.is_dealer and double_rounded or rounded} else: return {'main': config.is_dealer and six_rounded or four_rounded, 'additional': 0}
def calculate_scores(self, han, fu, config, is_yakuman=False)
Calculate how much scores cost a hand with given han and fu :param han: int :param fu: int :param config: HandConfig object :param is_yakuman: boolean :return: a dictionary with main and additional cost for ron additional cost is always = 0 for tsumo main cost is cost for dealer and additional is cost for player {'main': 1000, 'additional': 0}
2.696469
2.60085
1.036765
# we will modify them later, so we need to use a copy tiles_34 = copy.deepcopy(tiles_34) self._init(tiles_34) count_of_tiles = sum(tiles_34) if count_of_tiles > 14: return -2 # With open hand we need to remove open sets from hand and replace them with isolated pon sets # it will allow to calculate count of shanten correctly if open_sets_34: isolated_tiles = find_isolated_tile_indices(tiles_34) for meld in open_sets_34: if not isolated_tiles: break isolated_tile = isolated_tiles.pop() tiles_34[meld[0]] -= 1 tiles_34[meld[1]] -= 1 tiles_34[meld[2]] -= 1 tiles_34[isolated_tile] = 3 if not open_sets_34: self.min_shanten = self._scan_chiitoitsu_and_kokushi(chiitoitsu, kokushi) self._remove_character_tiles(count_of_tiles) init_mentsu = math.floor((14 - count_of_tiles) / 3) self._scan(init_mentsu) return self.min_shanten
def calculate_shanten(self, tiles_34, open_sets_34=None, chiitoitsu=True, kokushi=True)
Return the count of tiles before tempai :param tiles_34: 34 tiles format array :param open_sets_34: array of array of 34 tiles format :param chiitoitsu: bool :param kokushi: bool :return: int
3.873463
4.028078
0.961616
win_tile //= 4 open_sets = [x.tiles_34 for x in melds if x.opened] chi_sets = [x for x in hand if (is_chi(x) and win_tile in x and x not in open_sets)] pon_sets = [x for x in hand if is_pon(x)] closed_pon_sets = [] for item in pon_sets: if item in open_sets: continue # if we do the ron on syanpon wait our pon will be consider as open # and it is not 789999 set if win_tile in item and not is_tsumo and not len(chi_sets): continue closed_pon_sets.append(item) return len(closed_pon_sets) == 3
def is_condition_met(self, hand, win_tile, melds, is_tsumo)
Three closed pon sets, the other sets need not to be closed :param hand: list of hand's sets :param win_tile: 136 tiles format :param melds: list Meld objects :param is_tsumo: :return: true|false
5.856681
5.393304
1.085917
pon_sets = [x for x in hand if is_pon(x)] if len(pon_sets) != 4: return False count_wind_sets = 0 winds = [EAST, SOUTH, WEST, NORTH] for item in pon_sets: if is_pon(item) and item[0] in winds: count_wind_sets += 1 return count_wind_sets == 4
def is_condition_met(self, hand, *args)
The hand contains four sets of winds :param hand: list of hand's sets :return: boolean
3.599446
3.305766
1.088839
if not aka_enabled: return False if tile in [FIVE_RED_MAN, FIVE_RED_PIN, FIVE_RED_SOU]: return True return False
def is_aka_dora(tile, aka_enabled)
:param tile: int 136 tiles format :param aka_enabled: depends on table rules :return: boolean
4.522049
4.572447
0.988978
tile_index = tile // 4 dora_count = 0 for dora in dora_indicators: dora //= 4 # sou, pin, man if tile_index < EAST: # with indicator 9, dora will be 1 if dora == 8: dora = -1 elif dora == 17: dora = 8 elif dora == 26: dora = 17 if tile_index == dora + 1: dora_count += 1 else: if dora < EAST: continue dora -= 9 * 3 tile_index_temp = tile_index - 9 * 3 # dora indicator is north if dora == 3: dora = -1 # dora indicator is hatsu if dora == 6: dora = 3 if tile_index_temp == dora + 1: dora_count += 1 return dora_count
def plus_dora(tile, dora_indicators)
:param tile: int 136 tiles format :param dora_indicators: array of 136 tiles format :return: int count of dora
3.575207
3.586679
0.996802
isolated_indices = [] for x in range(0, CHUN + 1): # for honor tiles we don't need to check nearby tiles if is_honor(x) and hand_34[x] == 0: isolated_indices.append(x) else: simplified = simplify(x) # 1 suit tile if simplified == 0: if hand_34[x] == 0 and hand_34[x + 1] == 0: isolated_indices.append(x) # 9 suit tile elif simplified == 8: if hand_34[x] == 0 and hand_34[x - 1] == 0: isolated_indices.append(x) # 2-8 tiles tiles else: if hand_34[x] == 0 and hand_34[x - 1] == 0 and hand_34[x + 1] == 0: isolated_indices.append(x) return isolated_indices
def find_isolated_tile_indices(hand_34)
Tiles that don't have -1, 0 and +1 neighbors :param hand_34: array of tiles in 34 tile format :return: array of isolated tiles indices
2.458812
2.527533
0.972811
hand_34 = copy.copy(hand_34) # we don't need to count target tile in the hand hand_34[tile_34] -= 1 if hand_34[tile_34] < 0: hand_34[tile_34] = 0 indices = [] if is_honor(tile_34): return hand_34[tile_34] == 0 else: simplified = simplify(tile_34) # 1 suit tile if simplified == 0: indices = [tile_34, tile_34 + 1, tile_34 + 2] # 2 suit tile elif simplified == 1: indices = [tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2] # 8 suit tile elif simplified == 7: indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1] # 9 suit tile elif simplified == 8: indices = [tile_34 - 2, tile_34 - 1, tile_34] # 3-7 tiles tiles else: indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2] return all([hand_34[x] == 0 for x in indices])
def is_tile_strictly_isolated(hand_34, tile_34)
Tile is strictly isolated if it doesn't have -2, -1, 0, +1, +2 neighbors :param hand_34: array of tiles in 34 tile format :param tile_34: int :return: bool
2.088203
2.105665
0.991707
suits = [ {'count': 0, 'name': 'sou', 'function': is_sou}, {'count': 0, 'name': 'man', 'function': is_man}, {'count': 0, 'name': 'pin', 'function': is_pin}, {'count': 0, 'name': 'honor', 'function': is_honor} ] for x in range(0, 34): tile = tiles_34[x] if not tile: continue for item in suits: if item['function'](x): item['count'] += tile return suits
def count_tiles_by_suits(tiles_34)
Separate tiles by suits and count them :param tiles_34: array of tiles to count :return: dict
2.351076
2.517127
0.934032
indices = reduce(lambda z, y: z + y, hand) return all(x in HONOR_INDICES for x in indices)
def is_condition_met(self, hand, *args)
Hand composed entirely of honour tiles. :param hand: list of hand's sets :return: boolean
10.786852
8.309272
1.298171
if not melds: melds = [] closed_hand_tiles_34 = tiles_34[:] # small optimization, we can't have a pair in open part of the hand, # so we don't need to try find pairs in open sets open_tile_indices = melds and reduce(lambda x, y: x + y, [x.tiles_34 for x in melds]) or [] for open_item in open_tile_indices: closed_hand_tiles_34[open_item] -= 1 pair_indices = self.find_pairs(closed_hand_tiles_34) # let's try to find all possible hand options hands = [] for pair_index in pair_indices: local_tiles_34 = tiles_34[:] # we don't need to combine already open sets for open_item in open_tile_indices: local_tiles_34[open_item] -= 1 local_tiles_34[pair_index] -= 2 # 0 - 8 man tiles man = self.find_valid_combinations(local_tiles_34, 0, 8) # 9 - 17 pin tiles pin = self.find_valid_combinations(local_tiles_34, 9, 17) # 18 - 26 sou tiles sou = self.find_valid_combinations(local_tiles_34, 18, 26) honor = [] for x in HONOR_INDICES: if local_tiles_34[x] == 3: honor.append([x] * 3) if honor: honor = [honor] arrays = [[[pair_index] * 2]] if sou: arrays.append(sou) if man: arrays.append(man) if pin: arrays.append(pin) if honor: arrays.append(honor) for meld in melds: arrays.append([meld.tiles_34]) # let's find all possible hand from our valid sets for s in itertools.product(*arrays): hand = [] for item in list(s): if isinstance(item[0], list): for x in item: hand.append(x) else: hand.append(item) hand = sorted(hand, key=lambda a: a[0]) if len(hand) == 5: hands.append(hand) # small optimization, let's remove hand duplicates unique_hands = [] for hand in hands: hand = sorted(hand, key=lambda x: (x[0], x[1])) if hand not in unique_hands: unique_hands.append(hand) hands = unique_hands if len(pair_indices) == 7: hand = [] for index in pair_indices: hand.append([index] * 2) hands.append(hand) return hands
def divide_hand(self, tiles_34, melds=None)
Return a list of possible hands. :param tiles_34: :param melds: list of Meld objects :return:
2.548018
2.569809
0.99152
pair_indices = [] for x in range(first_index, second_index + 1): # ignore pon of honor tiles, because it can't be a part of pair if x in HONOR_INDICES and tiles_34[x] != 2: continue if tiles_34[x] >= 2: pair_indices.append(x) return pair_indices
def find_pairs(self, tiles_34, first_index=0, second_index=33)
Find all possible pairs in the hand and return their indices :return: array of pair indices
3.888299
3.871389
1.004368
indices = [] for x in range(first_index, second_index + 1): if tiles_34[x] > 0: indices.extend([x] * tiles_34[x]) if not indices: return [] all_possible_combinations = list(itertools.permutations(indices, 3)) def is_valid_combination(possible_set): if is_chi(possible_set): return True if is_pon(possible_set): return True return False valid_combinations = [] for combination in all_possible_combinations: if is_valid_combination(combination): valid_combinations.append(list(combination)) if not valid_combinations: return [] count_of_needed_combinations = int(len(indices) / 3) # simple case, we have count of sets == count of tiles if count_of_needed_combinations == len(valid_combinations) and \ reduce(lambda z, y: z + y, valid_combinations) == indices: return [valid_combinations] # filter and remove not possible pon sets for item in valid_combinations: if is_pon(item): count_of_sets = 1 count_of_tiles = 0 while count_of_sets > count_of_tiles: count_of_tiles = len([x for x in indices if x == item[0]]) / 3 count_of_sets = len([x for x in valid_combinations if x[0] == item[0] and x[1] == item[1] and x[2] == item[2]]) if count_of_sets > count_of_tiles: valid_combinations.remove(item) # filter and remove not possible chi sets for item in valid_combinations: if is_chi(item): count_of_sets = 5 # TODO calculate real count of possible sets count_of_possible_sets = 4 while count_of_sets > count_of_possible_sets: count_of_sets = len([x for x in valid_combinations if x[0] == item[0] and x[1] == item[1] and x[2] == item[2]]) if count_of_sets > count_of_possible_sets: valid_combinations.remove(item) # lit of chi\pon sets for not completed hand if hand_not_completed: return [valid_combinations] # hard case - we can build a lot of sets from our tiles # for example we have 123456 tiles and we can build sets: # [1, 2, 3] [4, 5, 6] [2, 3, 4] [3, 4, 5] # and only two of them valid in the same time [1, 2, 3] [4, 5, 6] possible_combinations = set(itertools.permutations( range(0, len(valid_combinations)), count_of_needed_combinations )) combinations_results = [] for combination in possible_combinations: result = [] for item in combination: result += valid_combinations[item] result = sorted(result) if result == indices: results = [] for item in combination: results.append(valid_combinations[item]) results = sorted(results, key=lambda z: z[0]) if results not in combinations_results: combinations_results.append(results) return combinations_results
def find_valid_combinations(self, tiles_34, first_index, second_index, hand_not_completed=False)
Find and return all valid set combinations in given suit :param tiles_34: :param first_index: :param second_index: :param hand_not_completed: in that mode we can return just possible shi\pon sets :return: list of valid combinations
2.39956
2.353692
1.019488
tiles = sorted(tiles) man = [t for t in tiles if t < 36] pin = [t for t in tiles if 36 <= t < 72] pin = [t - 36 for t in pin] sou = [t for t in tiles if 72 <= t < 108] sou = [t - 72 for t in sou] honors = [t for t in tiles if t >= 108] honors = [t - 108 for t in honors] sou = sou and ''.join([str((i // 4) + 1) for i in sou]) + 's' or '' pin = pin and ''.join([str((i // 4) + 1) for i in pin]) + 'p' or '' man = man and ''.join([str((i // 4) + 1) for i in man]) + 'm' or '' honors = honors and ''.join([str((i // 4) + 1) for i in honors]) + 'z' or '' return man + pin + sou + honors
def to_one_line_string(tiles)
Convert 136 tiles array to the one line string Example of output 123s123p123m33z
1.931294
1.845759
1.046341
temp = [] results = [] for x in range(0, 34): if tiles[x]: temp_value = [x * 4] * tiles[x] for tile in temp_value: if tile in results: count_of_tiles = len([x for x in temp if x == tile]) new_tile = tile + count_of_tiles results.append(new_tile) temp.append(tile) else: results.append(tile) temp.append(tile) return results
def to_136_array(tiles)
Convert 34 array to the 136 tiles array
3.604491
3.247018
1.110093
def _split_string(string, offset, red=None): data = [] temp = [] if not string: return [] for i in string: if i == 'r' and has_aka_dora: temp.append(red) data.append(red) else: tile = offset + (int(i) - 1) * 4 if tile == red and has_aka_dora: # prevent non reds to become red tile += 1 if tile in data: count_of_tiles = len([x for x in temp if x == tile]) new_tile = tile + count_of_tiles data.append(new_tile) temp.append(tile) else: data.append(tile) temp.append(tile) return data results = _split_string(man, 0, FIVE_RED_MAN) results += _split_string(pin, 36, FIVE_RED_PIN) results += _split_string(sou, 72, FIVE_RED_SOU) results += _split_string(honors, 108) return results
def string_to_136_array(sou=None, pin=None, man=None, honors=None, has_aka_dora=False)
Method to convert one line string tiles format to the 136 array. You can pass r instead of 5 for it to become a red five from that suit. To prevent old usage without red, has_aka_dora has to be True for this to do that. We need it to increase readability of our tests
3.322347
3.085334
1.076819
results = TilesConverter.string_to_136_array(sou, pin, man, honors) results = TilesConverter.to_34_array(results) return results
def string_to_34_array(sou=None, pin=None, man=None, honors=None)
Method to convert one line string tiles format to the 34 array We need it to increase readability of our tests
3.738542
3.115901
1.199827
if tile34 is None or tile34 > 33: return None tile = tile34 * 4 possible_tiles = [tile] + [tile + i for i in range(1, 4)] found_tile = None for possible_tile in possible_tiles: if possible_tile in tiles: found_tile = possible_tile break return found_tile
def find_34_tile_in_136_array(tile34, tiles)
Our shanten calculator will operate with 34 tiles format, after calculations we need to find calculated 34 tile in player's 136 tiles. For example we had 0 tile from 34 array in 136 array it can be present as 0, 1, 2, 3
2.656889
2.856431
0.930143
def value(self): return self._sign[1] * self.S0 * norm.cdf( self._sign[1] * self.d1, 0.0, 1.0 ) - self._sign[1] * self.K * np.exp(-self.r * self.T) * norm.cdf( self._sign[1] * self.d2, 0.0, 1.0 )
Compute option value according to BSM model.
null
null
null
def implied_vol(self, value, precision=1.0e-5, iters=100): vol = np.sqrt(2.0 * np.pi / self.T) * (value / self.S0) for _ in itertools.repeat(None, iters): # Faster than range opt = BSM( S0=self.S0, K=self.K, T=self.T, r=self.r, sigma=vol, kind=self.kind, ) diff = value - opt.value() if abs(diff) < precision: return vol vol = vol + diff / opt.vega() return vol
Get implied vol at the specified price using an iterative approach. There is no closed-form inverse of BSM-value as a function of sigma, so start at an anchoring volatility level from Brenner & Subrahmanyam (1988) and work iteratively from there. Resources --------- Brenner & Subrahmanyan, A Simple Formula to Compute the Implied Standard Deviation, 1988.
null
null
null
def add_option(self, K=None, price=None, St=None, kind="call", pos="long"): kinds = { "call": Call, "Call": Call, "c": Call, "C": Call, "put": Put, "Put": Put, "p": Put, "P": Put, } St = self.St if St is None else St option = kinds[kind](St=St, K=K, price=price, pos=pos) self.options.append(option)
Add an option to the object's `options` container.
null
null
null
def summary(self, St=None): St = self.St if St is None else St if self.options: payoffs = [op.payoff(St=St) for op in self.options] profits = [op.profit(St=St) for op in self.options] strikes = [op.K for op in self.options] prices = [op.price for op in self.options] exprs = [St] * len(self.options) kinds = [op.kind for op in self.options] poss = [op.pos for op in self.options] res = OrderedDict( [ ("kind", kinds), ("position", poss), ("strike", strikes), ("price", prices), ("St", exprs), ("payoff", payoffs), ("profit", profits), ] ) return DataFrame(res) else: return None
Tabular summary of strategy composition, broken out by option. Returns ------- pd.DataFrame Columns: kind, position, strike, price, St, payoff, profit.
null
null
null
def grid(self, start=None, stop=None, St=None, **kwargs): lb = 0.75 rb = 1.25 if not any((start, stop, St)) and self.St is None: St = np.mean([op.K for op in self.options], axis=0) start = St * lb stop = St * rb elif not any((start, stop)): St = self.St if St is None else St start = np.max(St) * lb stop = np.max(St) * rb St = np.linspace(start, stop, **kwargs) payoffs = self.payoff(St=St) profits = self.profit(St=St) return St, payoffs, profits
Grid-like representation of payoff & profit structure. Returns ------- tuple Tuple of `St` (price at expiry), `payoffs`, `profits`.
null
null
null
def _rolling_lstsq(x, y): if x.ndim == 2: # Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below # This means an original input of: # array([0., 1., 2., 3., 4., 5., 6.]) # becomes: # array([[[0.], # [1.], # [2.], # [3.]], # # [[1.], # [2.], # ... x = x[:, :, None] elif x.ndim <= 1: raise np.AxisError("x should have ndmi >= 2") return np.squeeze( np.matmul( np.linalg.inv(np.matmul(x.swapaxes(1, 2), x)), np.matmul(x.swapaxes(1, 2), np.atleast_3d(y)), ) )
Finds solution for the rolling case. Matrix formulation.
null
null
null
def _confirm_constant(a): a = np.asanyarray(a) return np.isclose(a, 1.0).all(axis=0).any()
Confirm `a` has volumn vector of 1s.
null
null
null
def _check_constant_params( a, has_const=False, use_const=True, rtol=1e-05, atol=1e-08 ): if all((has_const, use_const)): if not _confirm_constant(a): raise ValueError( "Data does not contain a constant; specify" " has_const=False" ) k = a.shape[-1] - 1 elif not any((has_const, use_const)): if _confirm_constant(a): raise ValueError( "Data already contains a constant; specify" " has_const=True" ) k = a.shape[-1] elif not has_const and use_const: # Also run a quick check to confirm that `a` is *not* ~N(0,1). # In this case, constant should be zero. (exclude it entirely) c1 = np.allclose(a.mean(axis=0), b=0.0, rtol=rtol, atol=atol) c2 = np.allclose(a.std(axis=0), b=1.0, rtol=rtol, atol=atol) if c1 and c2: # TODO: maybe we want to just warn here? raise ValueError( "Data appears to be ~N(0,1). Specify" " use_constant=False." ) # `has_constant` does checking on its own and raises VE if True try: a = add_constant(a, has_constant="raise") except ValueError as e: raise ValueError( "X data already contains a constant; please specify" " has_const=True" ) from e k = a.shape[-1] - 1 else: raise ValueError("`use_const` == False implies has_const is False.") return k, a
Helper func to interaction between has_const and use_const params. has_const use_const outcome --------- --------- ------- True True Confirm that a has constant; return a False False Confirm that a doesn't have constant; return a False True Confirm that a doesn't have constant; add constant True False ValueError
null
null
null
def condition_number(self): # Mimic x = np.matrix(self.x) (deprecated) x = np.atleast_2d(self.x) ev = np.linalg.eig(x.T @ x)[0] return np.sqrt(ev.max() / ev.min())
Condition number of x; ratio of largest to smallest eigenvalue.
null
null
null
def fstat_sig(self): return 1.0 - scs.f.cdf(self.fstat, self.df_reg, self.df_err)
p-value of the F-statistic.
null
null
null
def _pvalues_all(self): return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self.df_err))
Two-tailed p values for t-stats of all parameters.
null
null
null
def rsq_adj(self): n = self.n k = self.k return 1.0 - ((1.0 - self.rsq) * (n - 1.0) / (n - k - 1.0))
Adjusted R-squared.
null
null
null
def _se_all(self): x = np.atleast_2d(self.x) err = np.atleast_1d(self.ms_err) se = np.sqrt(np.diagonal(np.linalg.inv(x.T @ x)) * err[:, None]) return np.squeeze(se)
Standard errors (SE) for all parameters, including the intercept.
null
null
null
def ss_tot(self): return np.sum(np.square(self.y - self.ybar), axis=0)
Total sum of squares.
null
null
null
def ss_reg(self): return np.sum(np.square(self.predicted - self.ybar), axis=0)
Sum of squares of the regression.
null
null
null
def std_err(self): return np.sqrt(np.sum(np.square(self.resids), axis=0) / self.df_err)
Standard error of the estimate (SEE). A scalar. For standard errors of parameters, see _se_all, se_alpha, and se_beta.
null
null
null
def _std_err(self): return np.sqrt(np.sum(np.square(self._resids), axis=1) / self._df_err)
Standard error of the estimate (SEE). A scalar. For standard errors of parameters, see _se_all, se_alpha, and se_beta.
null
null
null
def _predicted(self): return np.squeeze( np.matmul(self.xwins, np.expand_dims(self.solution, axis=-1)) )
The predicted values of y ('yhat').
null
null
null
def _ss_tot(self): return np.sum( np.square(self.ywins - np.expand_dims(self._ybar, axis=-1)), axis=1 )
Total sum of squares.
null
null
null
def _ss_reg(self): return np.sum( np.square(self._predicted - np.expand_dims(self._ybar, axis=1)), axis=1, )
Sum of squares of the regression.
null
null
null
def _rsq_adj(self): n = self.n k = self.k return 1.0 - ((1.0 - self._rsq) * (n - 1.0) / (n - k - 1.0))
Adjusted R-squared.
null
null
null
def _fstat_sig(self): return 1.0 - scs.f.cdf(self._fstat, self._df_reg, self._df_err)
p-value of the F-statistic.
null
null
null
def _se_all(self): err = np.expand_dims(self._ms_err, axis=1) t1 = np.diagonal( np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)), axis1=1, axis2=2, ) return np.squeeze(np.sqrt(t1 * err))
Standard errors (SE) for all parameters, including the intercept.
null
null
null
def _condition_number(self): ev = np.linalg.eig(np.matmul(self.xwins.swapaxes(1, 2), self.xwins))[0] return np.sqrt(ev.max(axis=1) / ev.min(axis=1))
Condition number of x; ratio of largest to smallest eigenvalue.
null
null
null
def activeshare(fund, idx, in_format="num"): if not (fund.index.is_unique) and (idx.index.is_unique): raise ValueError("Inputs must have unique indices.") if isinstance(fund, pd.DataFrame): cols = fund.columns fund = fund * NUMTODEC[in_format] idx = idx * NUMTODEC[in_format] union = fund.index.union(idx.index) fund = fund.reindex(union, fill_value=0.0).values idx = idx.reindex(union, fill_value=0.0).values if fund.ndim == 1: # Resulting active share will be a scalar diff = fund - idx else: diff = fund - idx[:, None] act_sh = np.sum(np.abs(diff) * 0.5, axis=0) if isinstance(act_sh, np.ndarray): act_sh = pd.Series(act_sh, index=cols) return act_sh
Compute the active ahare of a fund versus an index. Formula is 0.5 * sum(abs(w_fund - w_idx)). Parameters ---------- fund: {pd.Series, pd.DataFrame} The fund's holdings, with tickers as the Index and weights as values. If a DataFrame, each column is a ticker/portfolio. idx: pd.Series The benchmark portfolio, with tickers as the Index and weights as values. in_format: {'num', 'dec'} Decimal notation of the inputs. "num" means 0.5% is denoted 0.5; "dec" means 0.5% is denoted 0.005. Returns ------- act_sh : pd.Series or pd.DataFrame The dimension will be one-lower than that of `fund`. If `fund` is a Series, the result is a scalar value. If `fund` is a DataFrame, the result is a Series, with the columns of `fund` as the resulting Index. .. _Cremers & Petajisto, 'How Active Is Your Fund Manager?', 2009
null
null
null
def amortize(rate, nper, pv, freq="M"): freq = utils.get_anlz_factor(freq) rate = rate / freq nper = nper * freq periods = np.arange(1, nper + 1, dtype=int) principal = np.ppmt(rate, periods, nper, pv) interest = np.ipmt(rate, periods, nper, pv) pmt = np.pmt(rate, nper, pv) def balance(pv, rate, nper, pmt): dfac = (1 + rate) ** nper return pv * dfac - pmt * (dfac - 1) / rate res = pd.DataFrame( { "beg_bal": balance(pv, rate, periods - 1, -pmt), "prin": principal, "interest": interest, "end_bal": balance(pv, rate, periods, -pmt), }, index=periods, )["beg_bal", "prin", "interest", "end_bal"] return res
Construct an amortization schedule for a fixed-rate loan. Rate -> annualized input Example ------- # a 6.75% $200,000 loan, 30-year tenor, payments due monthly # view the 5 final months print(amortize(rate=.0675, nper=30, pv=200000).round(2).tail()) beg_bal prin interest end_bal 356 6377.95 -1261.32 -35.88 5116.63 357 5116.63 -1268.42 -28.78 3848.22 358 3848.22 -1275.55 -21.65 2572.67 359 2572.67 -1282.72 -14.47 1289.94 360 1289.94 -1289.94 -7.26 -0.00
null
null
null
def corr_heatmap( x, mask_half=True, cmap="RdYlGn_r", vmin=-1, vmax=1, linewidths=0.5, square=True, figsize=(10, 10), **kwargs ): if mask_half: mask = np.zeros_like(x.corr().values) mask[np.triu_indices_from(mask)] = True else: mask = None with sns.axes_style("white"): return sns.heatmap( x.corr(), cmap=cmap, vmin=vmin, vmax=vmax, linewidths=linewidths, square=square, mask=mask, **kwargs )
Wrapper around seaborn.heatmap for visualizing correlation matrix. Parameters ---------- x : DataFrame Underlying data (not a correlation matrix) mask_half : bool, default True If True, mask (whiteout) the upper right triangle of the matrix All other parameters passed to seaborn.heatmap: https://seaborn.pydata.org/generated/seaborn.heatmap.html Example ------- # Generate some correlated data >>> import numpy as np >>> import pandas as pd >>> k = 10 >>> size = 400 >>> mu = np.random.randint(0, 10, k).astype(float) >>> r = np.random.ranf(k ** 2).reshape((k, k)) * 5 >>> df = pd.DataFrame(np.random.multivariate_normal(mu, r, size=size)) >>> corr_heatmap(df, figsize=(6, 6))
null
null
null
def ewm_params(param, param_value): if param not in ["alpha", "com", "span", "halflife"]: raise NameError("`param` must be one of {alpha, com, span, halflife}") def input_alpha(a): com = 1.0 / a - 1.0 span = 2.0 / a - 1.0 halflife = np.log(0.5) / np.log(1.0 - a) return {"com": com, "span": span, "halflife": halflife} def output_alpha(param, p): eqs = { "com": 1.0 / (1.0 + p), "span": 2.0 / (p + 1.0), "halflife": 1.0 - np.exp(np.log(0.5) / p), } return eqs[param] if param == "alpha": dct = input_alpha(a=param_value) alpha = param_value else: alpha = output_alpha(param=param, p=param_value) dct = input_alpha(a=alpha) dct.update({"alpha": alpha}) return dct
Corresponding parameter values for exponentially weighted functions. Parameters ---------- param : {'alpha', 'com', 'span', 'halflife'} param_value : float or int The parameter value. Returns ------- result : dict Layout/index of corresponding parameters.
null
null
null
def ewm_weights(i, com=None, span=None, halflife=None, alpha=None): if not any((com, span, halflife, alpha)): raise ValueError("specify one of `com`, `span`, `halflife`, `alpha`") params = [com, span, halflife, alpha] pos = next(i for (i, x) in enumerate(params) if x) param_value = next(item for item in params if item is not None) lookup = dict(enumerate(["com", "span", "halflife", "alpha"])) param = lookup[pos] alpha = ewm_params(param=param, param_value=param_value)["alpha"] res = (1.0 - alpha) ** np.arange(i)[::-1] res /= res.sum() return res
Exponential weights as a function of position `i`. Mimics pandas' methodology with adjust=True: http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
null
null
null
def ewm_bootstrap( a, size=None, com=None, span=None, halflife=None, alpha=None ): if not any((com, span, halflife, alpha)): raise ValueError("Specify one of `com`, `span`, `halflife`, `alpha`.") p = ewm_weights( i=len(a), com=com, span=span, halflife=halflife, alpha=alpha ) res = np.random.choice(a=a, size=size, p=p) return res
Bootstrap a new distribution through exponential weighting. Parameters ---------- a : 1-D array-like Array from which to generate random sample of elements size : int or tuple of ints, default None Output shape. If None, a single value is returned com : float, default None Center of mass; alpha = 1 / (1 + com), for com ≥ 0 span : float, default None Span parameter; a = 2 / (span + 1), for span ≥ 1 halflife : float, default None Halflife parameter; alpha = 1 − exp(log(0.5) / halflife), for halflife > 0 alpha : float, default None Smoothing factor Example ------- >>> import pandas as pd >>> np.random.seed(123) # Our bootstrapped histogram should approximate these freqs >>> ewm_weights(10, alpha=.10) [ 0.05948221 0.06609135 0.07343483 0.08159426 0.09066029 0.10073365 0.11192628 0.12436253 0.13818059 0.15353399] >>> res = ewm_bootstrap(np.arange(10), size=int(1e6), alpha=.10) >>> res = pd.Series(res).value_counts() >>> (res / res.sum()).head() 9 0.15323 8 0.13834 7 0.12424 6 0.11189 5 0.10113 dtype: float64
null
null
null
def variance_inflation_factor(regressors, hasconst=False): if not hasconst: regressors = add_constant(regressors, prepend=False) k = regressors.shape[1] def vif_sub(x, regressors): x_i = regressors.iloc[:, x] mask = np.arange(k) != x x_not_i = regressors.iloc[:, mask] rsq = linear_model.OLS(x_i, x_not_i, missing="drop").fit().rsquared_adj vif = 1.0 / (1.0 - rsq) return vif vifs = pd.Series(np.arange(k), index=regressors.columns) vifs = vifs.apply(vif_sub, args=(regressors,)) # Find the constant column (probably called 'const', but not necessarily # and drop it. `is_nonzero_const` borrowed from statsmodels.add_constant is_nonzero_const = np.ptp(regressors.values, axis=0) == 0 is_nonzero_const &= np.all(regressors != 0.0, axis=0) vifs.drop(vifs.index[is_nonzero_const], inplace=True) return vifs
Calculate variance inflation factor (VIF) for each all `regressors`. A wrapper/modification of statsmodels: statsmodels.stats.outliers_influence.variance_inflation_factor One recommendation is that if VIF is greater than 5, then the explanatory variable `x` is highly collinear with the other explanatory variables, and the parameter estimates will have large standard errors because of this. [source: StatsModels] Parameters ---------- regressors: DataFrame DataFrame containing the entire set of regressors hasconst : bool, default False If False, a column vector will be added to `regressors` for use in OLS Example ------- # Generate some data from datetime import date from pandas_datareader.data import DataReader as dr syms = {'TWEXBMTH' : 'usd', 'T10Y2YM' : 'term_spread', 'PCOPPUSDM' : 'copper' } start = date(2000, 1, 1) data = (dr(syms.keys(), 'fred', start) .pct_change() .dropna()) data = data.rename(columns = syms) print(variance_inflation_factor(data)) usd 1.31609 term_spread 1.03793 copper 1.37055 dtype: float64
null
null
null
def fit(self): # Defaults/anchors best_sse = np.inf best_param = (0.0, 1.0) best_dist = scs.norm # Compute the histogram of `x`. density=True gives a probability # density function at each bin, normalized such that the integral over # the range is 1.0 hist, bin_edges = np.histogram(self.x, bins=self.bins, density=True) # The results of np.histogram will have len(bin_edges) = len(hist) + 1 # Find the midpoint at each bin to reduce the size of bin_edges by 1 bin_edges = (bin_edges + np.roll(bin_edges, -1))[:-1] / 2.0 with warnings.catch_warnings(): warnings.filterwarnings("ignore") sses = [] params = [] for dist in self.distributions: dist = getattr(scs, dist) try: # The generic rv_continuous.fit() returns `mle_tuple`: # 'MLEs for any shape parameters (if applicable), # followed by those for location and scale.' param = *shape, loc, scale = dist.fit(self.x) pdf = dist.pdf(bin_edges, loc=loc, scale=scale, *shape) sse = np.sum(np.power(hist - pdf, 2.0)) sses.append(sse) params.append(param) if best_sse > sse > 0.0: best_dist = dist best_param = param best_sse = sse best_pdf = pdf except (NotImplementedError, AttributeError): sses.append(np.nan) params.append(np.nan) self.best_dist = best_dist self.best_param = best_param self.best_sse = best_sse self.best_pdf = best_pdf self.sses = sses self.params = params self.hist = hist self.bin_edges = bin_edges return self
Fit each distribution to `data` and calculate an SSE. WARNING: significant runtime. (~1min)
null
null
null
def best(self): return pd.Series( { "name": self.best_dist.name, "params": self.best_param, "sse": self.best_sse, } )
The resulting best-fit distribution, its parameters, and SSE.
null
null
null
def all(self, by="name", ascending=True): res = pd.DataFrame( { "name": self.distributions, "params": self.params, "sse": self.sses, } )[["name", "sse", "params"]] res.sort_values(by=by, ascending=ascending, inplace=True) return res
All tested distributions, their parameters, and SSEs.
null
null
null
def plot(self): plt.plot(self.bin_edges, self.hist, self.bin_edges, self.best_pdf)
Plot the empirical histogram versus best-fit distribution's PDF.
null
null
null
def fit(self): self.n_samples, self.n_features = self.ms.shape self.u, self.s, self.vt = np.linalg.svd(self.ms, full_matrices=False) self.v = self.vt.T # sklearn's implementation is to guarantee that the left and right # singular vectors (U and V) are always the same, by imposing the # that the largest coefficient of U in absolute value is positive # This implementation uses u_based_decision=False rather than the # default True to flip that logic and ensure the resulting # components and loadings have high positive coefficients self.u, self.vt = svd_flip( self.u, self.v, u_based_decision=self.u_based_decision ) self.v = self.vt.T # Drop eigenvalues with value > threshold # *keep* is number of components retained self.eigenvalues = self.s ** 2 / self.n_samples self.keep = np.count_nonzero(self.eigenvalues > self.threshold) self.inertia = (self.eigenvalues / self.eigenvalues.sum())[: self.keep] self.cumulative_inertia = self.inertia.cumsum()[: self.keep] self.eigenvalues = self.eigenvalues[: self.keep] return self
Fit the model by computing full SVD on m. SVD factors the matrix m as u * np.diag(s) * v, where u and v are unitary and s is a 1-d array of m‘s singular values. Note that the SVD is commonly written as a = U S V.H, and the v returned by this function is V.H (the Hermitian transpose). Therefore, we denote V.H as vt, and back into the actual v, denoted just v. The decomposition uses np.linalg.svd with full_matrices=False, so for m with shape (M, N), then the shape of: - u is (M, K) - v is (K, N where K = min(M, N) Intertia is the percentage of explained variance. Returns ------- self, to enable method chaining
null
null
null
def eigen_table(self): idx = ["Eigenvalue", "Variability (%)", "Cumulative (%)"] table = pd.DataFrame( np.array( [self.eigenvalues, self.inertia, self.cumulative_inertia] ), columns=["F%s" % i for i in range(1, self.keep + 1)], index=idx, ) return table
Eigenvalues, expl. variance, and cumulative expl. variance.
null
null
null
def loadings(self): loadings = self.v[:, : self.keep] * np.sqrt(self.eigenvalues) cols = ["PC%s" % i for i in range(1, self.keep + 1)] loadings = pd.DataFrame( loadings, columns=cols, index=self.feature_names ) return loadings
Loadings = eigenvectors times sqrt(eigenvalues).
null
null
null
def optimize(self): def te(weights, r, proxies): if isinstance(weights, list): weights = np.array(weights) proxy = np.sum(proxies * weights, axis=1) te = np.std(proxy - r) # not anlzd... return te ew = utils.equal_weights(n=self.n, sumto=self.sumto) bnds = tuple((0, 1) for x in range(self.n)) cons = {"type": "eq", "fun": lambda x: np.sum(x) - self.sumto} xs = [] funs = [] for i, j in zip(self._r, self._proxies): opt = sco.minimize( te, x0=ew, args=(i, j), method="SLSQP", bounds=bnds, constraints=cons, ) x, fun = opt["x"], opt["fun"] xs.append(x) funs.append(fun) self._xs = np.array(xs) self._funs = np.array(funs) return self
Analogous to `sklearn`'s fit. Returns `self` to enable chaining.
null
null
null
def opt_weights(self): return pd.DataFrame(self._xs, index=self.newidx, columns=self.cols)
Optimal weights (period-end).
null
null
null
def replicate(self): return np.sum( self.proxies[self.window :] * self._xs[:-1], axis=1 ).reindex(self.r.index)
Forward-month returns of the replicating portfolio.
null
null
null
if isinstance(obj, pd.Series): return obj elif isinstance(obj, pd.DataFrame) and obj.shape[-1] == 1: return obj.squeeze() else: if raise_: raise ValueError("Input cannot be squeezed.") return obj
def _try_to_squeeze(obj, raise_=False)
Attempt to squeeze to 1d Series. Parameters ---------- obj : {pd.Series, pd.DataFrame} raise_ : bool, default False
2.876479
2.727552
1.054601
if self.index.is_all_dates: # TODO: Could be more granular here, # for cases with < day frequency. td = self.index[-1] - self.index[0] n = td.total_seconds() / SECS_PER_CAL_YEAR else: # We don't have a datetime-like Index, so assume # periods/dates are consecutive and simply count them. # We do, however, need an explicit frequency. freq = freq if freq is not None else self.freq if freq is None: raise FrequencyError( "Must specify a `freq` when a" " datetime-like index is not used." ) n = len(self) / utils.get_anlz_factor(freq) return nanprod(self.ret_rels()) ** (1.0 / n) - 1.0
def anlzd_ret(self, freq=None)
Annualized (geometric) return. Parameters ---------- freq : str or None, default None A frequency string used to create an annualization factor. If None, `self.freq` will be used. If that is also None, a frequency will be inferred. If none can be inferred, an exception is raised. It may be any frequency string or anchored offset string recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or 'BQS-APR'. Returns ------- float
7.507804
7.105745
1.056582
if freq is None: freq = self._try_get_freq() if freq is None: raise FrequencyError(msg) return nanstd(self, ddof=ddof) * freq ** 0.5
def anlzd_stdev(self, ddof=0, freq=None, **kwargs)
Annualized standard deviation with `ddof` degrees of freedom. Parameters ---------- ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). freq : str or None, default None A frequency string used to create an annualization factor. If None, `self.freq` will be used. If that is also None, a frequency will be inferred. If none can be inferred, an exception is raised. It may be any frequency string or anchored offset string recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or 'BQS-APR'. **kwargs Passed to pd.Series.std(). TODO: freq Returns ------- float
5.176009
5.791708
0.893693
diff = self.excess_ret(benchmark) return np.count_nonzero(diff > 0.0) / diff.count()
def batting_avg(self, benchmark)
Percentage of periods when `self` outperformed `benchmark`. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. Returns ------- float
9.027723
9.065283
0.995857
beta = self.beta(benchmark=benchmark, **kwargs) return adj_factor * beta + (1 - adj_factor)
def beta_adj(self, benchmark, adj_factor=2 / 3, **kwargs)
Adjusted beta. Beta that is adjusted to reflect the tendency of beta to be mean reverting. [Source: CFA Institute] Formula: adj_factor * raw_beta + (1 - adj_factor) Parameters ---------- benchmark : {pd.Series, TSeries, pd.DataFrame, np.ndarray} The benchmark securitie(s) to which `self` is compared. Returns ------- float or np.ndarray If `benchmark` is 1d, returns a scalar. If `benchmark` is 2d, returns a 1d ndarray. Reference --------- .. _Blume, Marshall. "Betas and Their Regression Tendencies." http://www.stat.ucla.edu/~nchristo/statistics417/blume_betas.pdf
3.532933
5.62323
0.628275
if isinstance(compare_op(tuple, list)): op1, op2 = compare_op else: op1, op2 = compare_op, compare_op uc = self.up_capture( benchmark=benchmark, threshold=threshold, compare_op=op1 ) dc = self.down_capture( benchmark=benchmark, threshold=threshold, compare_op=op2 ) return uc / dc
def capture_ratio(self, benchmark, threshold=0.0, compare_op=("ge", "lt"))
Capture ratio--ratio of upside to downside capture. Upside capture ratio divided by the downside capture ratio. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. threshold : float, default 0. The threshold at which the comparison should be done. `self` and `benchmark` are "filtered" to periods where `benchmark` is greater than/less than `threshold`. compare_op : {tuple, str, list}, default ('ge', 'lt') Comparison operator used to compare to `threshold`. If a sequence, the two elements are passed to `self.up_capture()` and `self.down_capture()`, respectively. If `str`, indicates the comparison operater used in both method calls. Returns ------- float
3.409187
2.596943
1.312769
slf, bm = self.downmarket_filter( benchmark=benchmark, threshold=threshold, compare_op=compare_op, include_benchmark=True, ) return slf.geomean() / bm.geomean()
def down_capture(self, benchmark, threshold=0.0, compare_op="lt")
Downside capture ratio. Measures the performance of `self` relative to benchmark conditioned on periods where `benchmark` is lt or le to `threshold`. Downside capture ratios are calculated by taking the fund's monthly return during the periods of negative benchmark performance and dividing it by the benchmark return. [Source: CFA Institute] Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. threshold : float, default 0. The threshold at which the comparison should be done. `self` and `benchmark` are "filtered" to periods where `benchmark` is lt/le `threshold`. compare_op : {'lt', 'le'} Comparison operator used to compare to `threshold`. 'lt' is less-than; 'le' is less-than-or-equal. Returns ------- float Note ---- This metric uses geometric, not arithmetic, mean return.
6.963277
5.941056
1.172061
return self._mkt_filter( benchmark=benchmark, threshold=threshold, compare_op=compare_op, include_benchmark=include_benchmark, )
def downmarket_filter( self, benchmark, threshold=0.0, compare_op="lt", include_benchmark=False, )
Drop elementwise samples where `benchmark` > `threshold`. Filters `self` (and optionally, `benchmark`) to periods where `benchmark` < `threshold`. (Or <= `threshold`.) Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. threshold : float, default 0.0 The threshold at which the comparison should be done. `self` and `benchmark` are "filtered" to periods where `benchmark` is lt/le `threshold`. compare_op : {'lt', 'le'} Comparison operator used to compare to `threshold`. 'lt' is less-than; 'le' is less-than-or-equal. include_benchmark : bool, default False If True, return tuple of (`self`, `benchmark`) both filtered. If False, return only `self` filtered. Returns ------- TSeries or tuple of TSeries TSeries if `include_benchmark=False`, otherwise, tuple.
2.459722
3.311522
0.742777
end = self.drawdown_idx().idxmin() if return_date: return end.date() return end
def drawdown_end(self, return_date=False)
The date of the drawdown trough. Date at which the drawdown was most negative. Parameters ---------- return_date : bool, default False If True, return a `datetime.date` object. If False, return a Pandas Timestamp object. Returns ------- datetime.date or pandas._libs.tslib.Timestamp
7.264731
8.47022
0.857679
ri = self.ret_idx() return ri / np.maximum(ri.cummax(), 1.0) - 1.0
def drawdown_idx(self)
Drawdown index; TSeries of drawdown from running HWM. Returns ------- TSeries
9.786596
8.152183
1.200488
td = self.drawdown_end() - self.drawdown_start() if return_int: return td.days return td
def drawdown_length(self, return_int=False)
Length of drawdown in days. This is the duration from peak to trough. Parameters ---------- return_int : bool, default False If True, return the number of days as an int. If False, return a Pandas Timedelta object. Returns ------- int or pandas._libs.tslib.Timedelta
4.199407
4.805116
0.873945
td = self.recov_date() - self.drawdown_end() if return_int: return td.days return td
def drawdown_recov(self, return_int=False)
Length of drawdown recovery in days. This is the duration from trough to recovery date. Parameters ---------- return_int : bool, default False If True, return the number of days as an int. If False, return a Pandas Timedelta object. Returns ------- int or pandas._libs.tslib.Timedelta
6.718613
6.951442
0.966506
# Thank you @cᴏʟᴅsᴘᴇᴇᴅ # https://stackoverflow.com/a/47892766/7954504 dd = self.drawdown_idx() mask = nancumsum(dd == nanmin(dd.min)).astype(bool) start = dd.mask(mask)[::-1].idxmax() if return_date: return start.date() return start
def drawdown_start(self, return_date=False)
The date of the peak at which most severe drawdown began. Parameters ---------- return_date : bool, default False If True, return a `datetime.date` object. If False, return a Pandas Timestamp object. Returns ------- datetime.date or pandas._libs.tslib.Timestamp
9.307024
9.717454
0.957764
# TODO: plot these (compared) in docs. if isinstance(method, (int, float)): method = ["caer", "cger", "ecr", "ecrr"][method] method = method.lower() if method == "caer": er = self.excess_ret(benchmark=benchmark, method="arithmetic") return er.drawdown_idx() elif method == "cger": er = self.excess_ret(benchmark=benchmark, method="geometric") return er.drawdown_idx() elif method == "ecr": er = self.ret_idx() - benchmark.ret_idx() + 1 if er.isnull().any(): return er / er.cummax() - 1.0 else: return er / np.maximum.accumulate(er) - 1.0 elif method == "ecrr": # Credit to: SO @piRSquared # https://stackoverflow.com/a/36848867/7954504 p = self.ret_idx().values b = benchmark.ret_idx().values er = p - b if er.isnull().any(): # The slower route but NaN-friendly. cam = self.expanding(min_periods=1).apply(lambda x: x.argmax()) else: cam = utils.cumargmax(er) p0 = p[cam] b0 = b[cam] return (p * b0 - b * p0) / (p0 * b0) else: raise ValueError( "`method` must be one of" " ('caer', 'cger', 'ecr', 'ecrr')," " case-insensitive, or" " an integer mapping to these methods" " (1 thru 4)." )
def excess_drawdown_idx(self, benchmark, method="caer")
Excess drawdown index; TSeries of excess drawdowns. There are several ways of computing this metric. For highly volatile returns, the `method` specified will have a non-negligible effect on the result. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)} Indicates the methodology used.
4.305018
3.881899
1.108998
if method.startswith("arith"): return self - _try_to_squeeze(benchmark) elif method.startswith("geo"): # Geometric excess return, # (1 + `self`) / (1 + `benchmark`) - 1. return ( self.ret_rels() / _try_to_squeeze(benchmark).ret_rels() - 1.0 )
def excess_ret(self, benchmark, method="arithmetic")
Excess return. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. method : {{'arith', 'arithmetic'}, {'geo', 'geometric'}} The methodology used. An arithmetic excess return is a straightforward subtraction. A geometric excess return is the ratio of return-relatives of `self` to `benchmark`, minus one. Also known as: active return. Reference --------- .. _Essex River Analytics - A Case for Arithmetic Attribution http://www.northinfo.com/documents/563.pdf .. _Bacon, Carl. Excess Returns - Arithmetic or Geometric? https://www.cfapubs.org/doi/full/10.2469/dig.v33.n1.1235
7.761766
5.046643
1.538006
gt = self > 0 lt = self < 0 return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean())
def gain_to_loss_ratio(self)
Gain-to-loss ratio, ratio of positive to negative returns. Formula: (n pos. / n neg.) * (avg. up-month return / avg. down-month return) [Source: CFA Institute] Returns ------- float
6.010458
6.42087
0.936082
diff = self.excess_ret(benchmark).anlzd_ret() return diff / self.tracking_error(benchmark, ddof=ddof)
def info_ratio(self, benchmark, ddof=0)
Information ratio--return per unit of active risk. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). Returns ------- float
14.183802
15.794175
0.89804
rf = self._validate_rf(rf) scaling = benchmark.anlzd_stdev(ddof) / self.anlzd_stdev(ddof) diff = self.anlzd_ret() - rf return rf + diff * scaling
def msquared(self, benchmark, rf=0.02, ddof=0)
M-squared, return scaled by relative total risk. A measure of what a portfolio would have returned if it had taken on the same *total* risk as the market index. [Source: CFA Institute] Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. rf : {float, TSeries, pd.Series}, default 0.02 If float, this represents an *compounded annualized* risk-free rate; 2.0% is the default. If a TSeries or pd.Series, this represents a time series of periodic returns to a risk-free security. To download a risk-free rate return series using 3-month US T-bill yields, see:`pyfinance.datasets.load_rf`. ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). Returns ------- float
7.690842
10.128151
0.759353
return np.count_nonzero(self[self < threshold]) / self.count()
def pct_negative(self, threshold=0.0)
Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float
8.926801
9.916154
0.900228
return np.count_nonzero(self[self > threshold]) / self.count()
def pct_positive(self, threshold=0.0)
Pct. of periods in which `self` is greater than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float
8.891481
10.109255
0.879539
dd = self.drawdown_idx() # False beginning on trough date and all later dates. mask = nancumprod(dd != nanmin(dd)).astype(bool) res = dd.mask(mask) == 0 # If `res` is all False (recovery has not occured), # .idxmax() will return `res.index[0]`. if not res.any(): recov = pd.NaT else: recov = res.idxmax() if return_date: return recov.date() return recov
def recov_date(self, return_date=False)
Drawdown recovery date. Date at which `self` recovered to previous high-water mark. Parameters ---------- return_date : bool, default False If True, return a `datetime.date` object. If False, return a Pandas Timestamp object. Returns ------- {datetime.date, pandas._libs.tslib.Timestamp, pd.NaT} Returns NaT if recovery has not occured.
10.048148
8.445644
1.189743
return self.ret_rels().resample(freq, **kwargs).prod() - 1.0
def rollup(self, freq, **kwargs)
Downsample `self` through geometric linking. Parameters ---------- freq : {'D', 'W', 'M', 'Q', 'A'} The frequency of the result. **kwargs Passed to `self.resample()`. Returns ------- TSeries Example ------- # Derive quarterly returns from monthly returns. >>> import numpy as np >>> from pyfinance import TSeries >>> np.random.seed(444) >>> ts = TSeries(np.random.randn(12) / 100 + 0.002, ... index=pd.date_range('2016', periods=12, freq='M')) >>> ts.rollup('Q') 2016-03-31 0.0274 2016-06-30 -0.0032 2016-09-30 -0.0028 2016-12-31 0.0127 Freq: Q-DEC, dtype: float64
24.899424
30.544909
0.815174
if freq is None: freq = self._try_get_freq() if freq is None: raise FrequencyError(msg) n = self.count() - ddof ss = (nansum(np.minimum(self - threshold, 0.0) ** 2) ** 0.5) / n return ss * freq ** 0.5
def semi_stdev(self, threshold=0.0, ddof=0, freq=None)
Semi-standard deviation; stdev of downside returns. It is designed to address that fact that plain standard deviation penalizes "upside volatility."" Formula: `sqrt( sum([min(self - thresh, 0] **2 ) / (n - ddof) )` Also known as: downside deviation. Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. While zero is the default, it is also customary to use a "minimum acceptable return" (MAR) or a risk-free rate. Note: this is assumed to be a *periodic*, not necessarily annualized, return. ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). freq : str or None, default None A frequency string used to create an annualization factor. If None, `self.freq` will be used. If that is also None, a frequency will be inferred. If none can be inferred, an exception is raised. It may be any frequency string or anchored offset string recognized by Pandas, such as 'D', '5D', 'Q', 'Q-DEC', or 'BQS-APR'. Returns ------- float
4.918073
4.880328
1.007734
rf = self._validate_rf(rf) stdev = self.anlzd_stdev(ddof=ddof) return (self.anlzd_ret() - rf) / stdev
def sharpe_ratio(self, rf=0.02, ddof=0)
Return over `rf` per unit of total risk. The average return in excess of the risk-free rate divided by the standard deviation of return; a measure of the average excess return earned per unit of standard deviation of return. [Source: CFA Institute] Parameters ---------- rf : {float, TSeries, pd.Series}, default 0.02 If float, this represents an *compounded annualized* risk-free rate; 2.0% is the default. If a TSeries or pd.Series, this represents a time series of periodic returns to a risk-free security. To download a risk-free rate return series using 3-month US T-bill yields, see:`pyfinance.datasets.load_rf`. ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). Returns ------- float
6.097524
7.912508
0.770618