prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
data_collection_10_Network_Centrality.py
This code calcualtes centrality metrics...
1 - load in graphs - Largest weakly Connected Components!!
2 - applying HITs algorithm
4 - re-calculating centrality within my sub-graph only
(4 - draw network graphs with hubs centrality metrics --> see next .py doc)
@author: lizakarmannaya
"""
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import fnmatch
import os
import glob
from scipy.stats import skew, kurtosis, mode
#### 1 - load in graphs - Largest weakly Connected Components!! ####
os.chdir(os.path.expanduser("~"))
L = nx.read_pajek('study2_largest_wcc_LEFT_directed.net')
R = nx.read_pajek('study2_largest_wcc_RIGHT_directed.net')
#this imports them as multigraph types --> convert to DiGraph
L = nx.DiGraph(L)
R = nx.DiGraph(R)
########################################
#### 2 - applying HITS algorithm #####
########################################
#for LEFT
hits_L_hubs, hits_L_authorities = nx.hits(L)
plt.hist(hits_L_hubs.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
plt.show()
#deleting elites from graph
my_elites = pd.read_csv('my_elites.csv', index_col=0)
my_elites['twitter_name_index'] = my_elites['twitter_name']
my_elites = my_elites.set_index('twitter_name_index') #using twitter_name = screen_name as index for later
my_elites.head()
elites_ids = my_elites['user_id'] #pandas series
len(elites_ids) #420
##now delete these elites from page_rank_L - LEFT:
#need to create a list of strings first
to_delete = []
for item in elites_ids:
key = str(item)
to_delete.append(key)
len(to_delete) #420
### LEFT ####
to_delete_LEFT = set(L.nodes()).intersection(to_delete)
len(to_delete_LEFT) #29
hits_L_hubs_noelites = hits_L_hubs ## NB this currently doesn't help distibguish them
for item in to_delete_LEFT:
del hits_L_hubs_noelites[item]
len(hits_L_hubs_noelites) #822752 - without elites
L.number_of_nodes() #822781 - with elites
##NB re-run these 3 sections below
plt.hist(hits_L_hubs_noelites.values(), 40, log=False, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
plt.hist(hits_L_hubs_noelites.values(), 40, log=True, facecolor='red', alpha=0.5)
plt.savefig('RESULTS/hist_LEFT_hubs_noelites_logscale') #have to execute together with line above to avoid saving blank canvas
LEFT_hubs = pd.DataFrame.from_dict(data=hits_L_hubs_noelites, orient='index', columns=['hubs'])
LEFT_hubs.to_csv('hubs_scores/LEFT_hubs_noelites.csv')
#repeat for RIGHT
hits_R_hubs, hits_R_authorities = nx.hits(R)
#example hits_L_authorities['703690879'] #0
plt.hist(hits_R_hubs.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_with_elites') #have to execute together with line above to avoid saving blank canvas
#deleting elites from graph
to_delete_RIGHT = set(R.nodes()).intersection(to_delete)
len(to_delete_RIGHT) #35
hits_R_hubs_noelites = hits_R_hubs ### NB this currently doesn't help distibguish them - pointless
for item in to_delete_RIGHT:
del hits_R_hubs_noelites[item]
len(hits_R_hubs_noelites) #1542221 - without elites
#len(hits_R_hubs) #1542221 - original dict is also modified
R.number_of_nodes() #1542256 - with elites
#NB re-run these 3 sections below
plt.hist(hits_R_hubs_noelites.values(), 40, log=False, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_noelites') #have to execute together with line above to avoid saving blank canvas
plt.hist(hits_R_hubs_noelites.values(), 40, log=True, facecolor='blue', alpha=0.5)
plt.savefig('RESULTS/hist_RIGHT_hubs_noelites_logscale') #have to execute together with line above to avoid saving blank canvas
RIGHT_hubs = pd.DataFrame.from_dict(data=hits_R_hubs_noelites, orient='index', columns=['hubs'])
RIGHT_hubs.to_csv('hubs_scores/RIGHT_hubs_noelites.csv')
RIGHT_hubs
#### calculating skew and kurtosis for entire sample's hubs centrality
## NB re-run these?
L_hubs = list(hits_L_hubs.values()) #currently this is without the elites, as they were taken out above
len(L_hubs) #822752
skew(L_hubs) #-0.1830900326354742
kurtosis(L_hubs) #-1.8363738717470777
np.mean(L_hubs)
mode(L_hubs)
np.median(L_hubs)
np.std(L_hubs)
R_hubs = list(hits_R_hubs.values()) #currently this is without the elites, as they were taken out above
len(R_hubs) #1542221
skew(R_hubs) #-0.6376712808927192
kurtosis(R_hubs) #-1.16105655692604
np.mean(R_hubs)
mode(R_hubs)
np.median(R_hubs)
np.std(R_hubs)
entire_hubs = L_hubs+R_hubs
len(entire_hubs) #2,364,973
skew(entire_hubs) #0.7903545150997883
kurtosis(entire_hubs) #-0.3640943243229504
np.mean(entire_hubs)
mode(entire_hubs)
np.median(entire_hubs)
np.std(entire_hubs)
#### save hubs & authorities values into results df ####
df = | pd.read_csv('RESULTS_df_multiverse_4.csv', index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/27 9:55 AM
# @Author : R
# @File : TMDB_Predict_Finally.py
# @Software: PyCharm
# coding: utf-8
# # Kaggle for TMDB
# In[1]:
import numpy as np
import pandas as pd
import warnings
from tqdm import tqdm
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
from collections import Counter
warnings.filterwarnings('ignore')
# get_ipython().run_line_magic('matplotlib', 'inline')
# Data description
# id:每部电影的唯一标志
# belongs_to_collection:json格式下每部电影的tmdb id, 电影名、电影海报和电影背景的URL
# budget:电影预算,数值为0表示未知
# genres:电影风格列表,json文件,包含id、name
# homepage:电影官方主页的URL
# imdb_id:该电影在imdb数据库中的唯一id标志
# original_language:电影制作的原始语言,长度为2的字符串
# original_title:电影的原始名称,可能与belong_to_collection中的名称不同
# overview: 剧情摘要
# popularity: 电影的受欢迎程度,float数值表示
# poster_path: 电影海报的URL
# production_companies:json格式,电影制造公司的id、name
# production_countries:json格式,电影制造国家 2字符简称、全称
# release_date:电影上映时间
# runtime:电影时长
# spoken_languages:电影语言版本,json格式
# status:电影是否已经发布
# tagline: 电影的标语
# title: 电影的英文名称
# keywords:电影关键字,json格式
# cast: json格式,演员列表,包括id,name,性别等
# crew:电影制作人员的信息,包括导演,作者等
# revenue:总收入,待预测值
# # EDA
# EDA已做
# 特征工程以及预测
# 利用两个额外的数据集合
# 1.TMDB Competition Additional Features:本数据包含新的三个特征popularity2、rating、totalVotes
# 2.TMDB Competition Additional Training Data:额外的2000个训练数据,没有给定训练集中所有的属性
# In[52]:
# Feature Engineering & Prediction
# 数据预处理函数,包括将非数值型属性转化为数值型
def prepare(df):
global json_cols
global train_dict
df[['release_month', 'release_day', 'release_year']] = df['release_date'].str.split('/', expand=True).replace(
np.nan, 0).astype(int)
df['release_year'] = df['release_year']
df.loc[(df['release_year'] <= 19) & (df['release_year'] < 100), "release_year"] += 2000
df.loc[(df['release_year'] > 19) & (df['release_year'] < 100), "release_year"] += 1900
# 获取发行日期的星期、季度信息
releaseDate = pd.to_datetime(df['release_date'])
df['release_dayofweek'] = releaseDate.dt.dayofweek
df['release_quarter'] = releaseDate.dt.quarter
# 对rating、totalVotes属性进行填充
rating_na = df.groupby(["release_year", "original_language"])['rating'].mean().reset_index()
df[df.rating.isna()]['rating'] = df.merge(rating_na, how='left', on=["release_year", "original_language"])
vote_count_na = df.groupby(["release_year", "original_language"])['totalVotes'].mean().reset_index()
df[df.totalVotes.isna()]['totalVotes'] = df.merge(vote_count_na, how='left',
on=["release_year", "original_language"])
# df['rating'] = df['rating'].fillna(1.5)
# df['totalVotes'] = df['totalVotes'].fillna(6)
# 构建一个新属性,weightRating
df['weightedRating'] = (df['rating'] * df['totalVotes'] + 6.367 * 1000) / (df['totalVotes'] + 1000)
# 考虑到不同时期的面额意义不同,对其进行“通货膨胀”,通货膨胀比例为1.8%/年
df['originalBudget'] = df['budget']
df['inflationBudget'] = df['budget'] + df['budget'] * 1.8 / 100 * (
2018 - df['release_year']) # Inflation simple formula
df['budget'] = np.log1p(df['budget'])
# 对crew、cast属性中人员性别构成进行统计
df['genders_0_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
df['genders_0_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
# 对belongs_to_collection、Keywords、cast进行统计
df['_collection_name'] = df['belongs_to_collection'].apply(lambda x: x[0]['name'] if x != {} else 0)
le = LabelEncoder()
le.fit(list(df['_collection_name'].fillna('')))
df['_collection_name'] = le.transform(df['_collection_name'].fillna('').astype(str))
df['_num_Keywords'] = df['Keywords'].apply(lambda x: len(x) if x != {} else 0)
df['_num_cast'] = df['cast'].apply(lambda x: len(x) if x != {} else 0)
df['_num_crew'] = df['crew'].apply(lambda x: len(x) if x != {} else 0)
df['_popularity_mean_year'] = df['popularity'] / df.groupby("release_year")["popularity"].transform('mean')
df['_budget_runtime_ratio'] = df['budget'] / df['runtime']
df['_budget_popularity_ratio'] = df['budget'] / df['popularity']
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_releaseYear_popularity_ratio'] = df['release_year'] / df['popularity']
df['_releaseYear_popularity_ratio2'] = df['popularity'] / df['release_year']
df['_popularity_totalVotes_ratio'] = df['totalVotes'] / df['popularity']
df['_rating_popularity_ratio'] = df['rating'] / df['popularity']
df['_rating_totalVotes_ratio'] = df['totalVotes'] / df['rating']
df['_totalVotes_releaseYear_ratio'] = df['totalVotes'] / df['release_year']
df['_budget_rating_ratio'] = df['budget'] / df['rating']
df['_runtime_rating_ratio'] = df['runtime'] / df['rating']
df['_budget_totalVotes_ratio'] = df['budget'] / df['totalVotes']
# 对是否有homepage分类
df['has_homepage'] = 1
df.loc[pd.isnull(df['homepage']), "has_homepage"] = 0
# 对belongs_to_collection是否为空分类
df['isbelongs_to_collectionNA'] = 0
df.loc[pd.isnull(df['belongs_to_collection']), "isbelongs_to_collectionNA"] = 1
# 对tagline是否为空分类
df['isTaglineNA'] = 0
df.loc[df['tagline'] == 0, "isTaglineNA"] = 1
# 对original——langues是否为English判定
df['isOriginalLanguageEng'] = 0
df.loc[df['original_language'] == "en", "isOriginalLanguageEng"] = 1
# 对电影名是否不同判定
df['isTitleDifferent'] = 1
df.loc[df['original_title'] == df['title'], "isTitleDifferent"] = 0
# 对电影是否上映判定
df['isMovieReleased'] = 1
df.loc[df['status'] != "Released", "isMovieReleased"] = 0
# 电影是否有摘要
df['isOverviewNA'] = 0
df.loc[pd.isnull(df['overview']), 'isOverviewNA'] = 1
# 获取collection id
df['collection_id'] = df['belongs_to_collection'].apply(lambda x: np.nan if len(x) == 0 else x[0]['id'])
# 对original——title等属性统计长度
df['original_title_letter_count'] = df['original_title'].str.len()
df['original_title_word_count'] = df['original_title'].str.split().str.len()
# 对title、overview、tagline统计长度或个数
df['title_word_count'] = df['title'].str.split().str.len()
df['overview_word_count'] = df['overview'].str.split().str.len()
df['tagline_word_count'] = df['tagline'].str.split().str.len()
df['len_title'] = df['title'].fillna('').apply(lambda x: len(str(x)))
# 对production_conpany、country、cast、crew、spoken_languages统计
df['production_countries_count'] = df['production_countries'].apply(lambda x: len(x))
df['production_companies_count'] = df['production_companies'].apply(lambda x: len(x))
df['cast_count'] = df['cast'].apply(lambda x: len(x))
df['crew_count'] = df['crew'].apply(lambda x: len(x))
df['spoken_languages_count'] = df['spoken_languages'].apply(lambda x: len(x))
df['genres_count'] = df['genres'].apply(lambda x: len(x))
# 进行按年分组计算均值填充
df['meanruntimeByYear'] = df.groupby("release_year")["runtime"].aggregate('mean')
df['meanPopularityByYear'] = df.groupby("release_year")["popularity"].aggregate('mean')
df['meanBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('mean')
df['meantotalVotesByYear'] = df.groupby("release_year")["totalVotes"].aggregate('mean')
df['meanTotalVotesByRating'] = df.groupby("rating")["totalVotes"].aggregate('mean')
df['medianBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('median')
####################################################################################
df['_popularity_theatrical_ratio'] = df['theatrical'] / df['popularity']
df['_budget_theatrical_ratio'] = df['budget'] / df['theatrical']
# runtime
df['runtime_cat_min_60'] = df['runtime'].apply(lambda x: 1 if (x <= 60) else 0)
df['runtime_cat_61_80'] = df['runtime'].apply(lambda x: 1 if (x > 60) & (x <= 80) else 0)
df['runtime_cat_81_100'] = df['runtime'].apply(lambda x: 1 if (x > 80) & (x <= 100) else 0)
df['runtime_cat_101_120'] = df['runtime'].apply(lambda x: 1 if (x > 100) & (x <= 120) else 0)
df['runtime_cat_121_140'] = df['runtime'].apply(lambda x: 1 if (x > 120) & (x <= 140) else 0)
df['runtime_cat_141_170'] = df['runtime'].apply(lambda x: 1 if (x > 140) & (x <= 170) else 0)
df['runtime_cat_171_max'] = df['runtime'].apply(lambda x: 1 if (x >= 170) else 0)
lang = df['original_language']
df_more_17_samples = [x[0] for x in Counter(pd.DataFrame(lang).stack()).most_common(17)]
for col in df_more_17_samples:
df[col] = df['original_language'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 12):
df['month' + str(col)] = df['release_month'].apply(lambda x: 1 if x == col else 0)
# feature engeneering : Release date per quarter one hot encoding
for col in range(1, 4):
df['quarter' + str(col)] = df['release_quarter'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 7):
df['dayofweek' + str(col)] = df['release_dayofweek'].apply(lambda x: 1 if x == col else 0)
# 新加入属性
df['is_release_day_of_1'] = 0
df.loc[df['release_day'] == 1, 'is_release_day_of_1'] = 1
df['is_release_day_of_15'] = 0
df.loc[df['release_day'] == 15, 'is_release_day_of_15'] = 1
# 新属性加入
# df['popularity2'] = np.log1p(df['popularity2'])
# df['popularity'] = np.log1p(df['popularity'])
# for col in range(1, 32):
# df['release_day' + str(col)] = df['release_day'].apply(lambda x: 1 if x == col else 0)
df['is_release_day_of_31'] = 0
df.loc[df['release_day'] == 31, 'is_release_day_of_15'] = 1
# popularity
df['popularity_cat_25'] = df['popularity'].apply(lambda x: 1 if (x <= 25) else 0)
df['popularity_cat_26_50'] = df['popularity'].apply(lambda x: 1 if (x > 25) & (x <= 50) else 0)
df['popularity_cat_51_100'] = df['popularity'].apply(lambda x: 1 if (x > 50) & (x <= 100) else 0)
df['popularity_cat_101_150'] = df['popularity'].apply(lambda x: 1 if (x > 100) & (x <= 150) else 0)
df['popularity_cat_151_200'] = df['popularity'].apply(lambda x: 1 if (x > 150) & (x <= 200) else 0)
df['popularity_cat_201_max'] = df['popularity'].apply(lambda x: 1 if (x >= 200) else 0)
df['_runtime_totalVotes_ratio'] = df['runtime'] / df['totalVotes']
df['_runtime_popularity_ratio'] = df['runtime'] / df['popularity']
#
df['_rating_theatrical_ratio'] = df['theatrical'] / df['rating']
df['_totalVotes_theatrical_ratio'] = df['theatrical'] / df['totalVotes']
df['_budget_mean_year'] = df['budget'] / df.groupby("release_year")["budget"].transform('mean')
df['_runtime_mean_year'] = df['runtime'] / df.groupby("release_year")["runtime"].transform('mean')
df['_rating_mean_year'] = df['rating'] / df.groupby("release_year")["rating"].transform('mean')
df['_totalVotes_mean_year'] = df['totalVotes'] / df.groupby("release_year")["totalVotes"].transform('mean')
###############################################################
# 对某些json属性,具有多个值的,进行类似‘one-hot编码’
for col in ['genres', 'production_countries', 'spoken_languages', 'production_companies','Keywords']:
df[col] = df[col].map(lambda x: sorted(
list(set([n if n in train_dict[col] else col + '_etc' for n in [d['name'] for d in x]])))).map(
lambda x: ','.join(map(str, x)))
temp = df[col].str.get_dummies(sep=',')
df = pd.concat([df, temp], axis=1, sort=False)
# 删除非数值属性和暂时未提出有用信息的属性
df.drop(['genres_etc'], axis=1, inplace=True)
df = df.drop(['id', 'revenue', 'belongs_to_collection', 'genres', 'homepage', 'imdb_id', 'overview', 'runtime'
, 'poster_path', 'production_companies', 'production_countries', 'release_date', 'spoken_languages'
, 'status', 'title', 'Keywords', 'cast', 'crew', 'original_language', 'original_title', 'tagline',
'collection_id'
], axis=1)
# 填充缺失值
df.fillna(value=0.0, inplace=True)
return df
# 对train中的某些数据手动处理
# 处理包括budget、revenue
# 对budget远小于revenue的情况统计,对其进行处理
# 处理原则,对于可以查询到的信息,进行真实数据填充,否则取当年同期同类型电影的均值
train = pd.read_csv('train.csv')
train.loc[train['id'] == 16, 'revenue'] = 192864 # Skinning
train.loc[train['id'] == 90, 'budget'] = 30000000 # Sommersby
train.loc[train['id'] == 118, 'budget'] = 60000000 # Wild Hogs
train.loc[train['id'] == 149, 'budget'] = 18000000 # Beethoven
train.loc[train['id'] == 313, 'revenue'] = 12000000 # The Cookout
train.loc[train['id'] == 451, 'revenue'] = 12000000 # Chasing Liberty
train.loc[train['id'] == 464, 'budget'] = 20000000 # Parenthood
train.loc[train['id'] == 470, 'budget'] = 13000000 # The Karate Kid, Part II
train.loc[train['id'] == 513, 'budget'] = 930000 # From Prada to Nada
train.loc[train['id'] == 797, 'budget'] = 8000000 # Welcome to Dongmakgol
train.loc[train['id'] == 819, 'budget'] = 90000000 # Alvin and the Chipmunks: The Road Chip
train.loc[train['id'] == 850, 'budget'] = 90000000 # Modern Times
train.loc[train['id'] == 1007, 'budget'] = 2 # Zyzzyx Road
train.loc[train['id'] == 1112, 'budget'] = 7500000 # An Officer and a Gentleman
train.loc[train['id'] == 1131, 'budget'] = 4300000 # Smokey and the Bandit
train.loc[train['id'] == 1359, 'budget'] = 10000000 # Stir Crazy
train.loc[train['id'] == 1542, 'budget'] = 1 # All at Once
train.loc[train['id'] == 1570, 'budget'] = 15800000 # Crocodile Dundee II
train.loc[train['id'] == 1571, 'budget'] = 4000000 # Lady and the Tramp
train.loc[train['id'] == 1714, 'budget'] = 46000000 # The Recruit
train.loc[train['id'] == 1721, 'budget'] = 17500000 # Cocoon
train.loc[train['id'] == 1865, 'revenue'] = 25000000 # Scooby-Doo 2: Monsters Unleashed
train.loc[train['id'] == 1885, 'budget'] = 12 # In the Cut
train.loc[train['id'] == 2091, 'budget'] = 10 # Deadfall
train.loc[train['id'] == 2268, 'budget'] = 17500000 # Madea Goes to Jail budget
train.loc[train['id'] == 2491, 'budget'] = 6 # Never Talk to Strangers
train.loc[train['id'] == 2602, 'budget'] = 31000000 # Mr. Holland's Opus
train.loc[train['id'] == 2612, 'budget'] = 15000000 # Field of Dreams
train.loc[train['id'] == 2696, 'budget'] = 10000000 # Nurse 3-D
train.loc[train['id'] == 2801, 'budget'] = 10000000 # Fracture
train.loc[train['id'] == 335, 'budget'] = 2
train.loc[train['id'] == 348, 'budget'] = 12
train.loc[train['id'] == 470, 'budget'] = 13000000
train.loc[train['id'] == 513, 'budget'] = 1100000
train.loc[train['id'] == 640, 'budget'] = 6
train.loc[train['id'] == 696, 'budget'] = 1
train.loc[train['id'] == 797, 'budget'] = 8000000
train.loc[train['id'] == 850, 'budget'] = 1500000
train.loc[train['id'] == 1199, 'budget'] = 5
train.loc[train['id'] == 1282, 'budget'] = 9 # Death at a Funeral
train.loc[train['id'] == 1347, 'budget'] = 1
train.loc[train['id'] == 1755, 'budget'] = 2
train.loc[train['id'] == 1801, 'budget'] = 5
train.loc[train['id'] == 1918, 'budget'] = 592
train.loc[train['id'] == 2033, 'budget'] = 4
train.loc[train['id'] == 2118, 'budget'] = 344
train.loc[train['id'] == 2252, 'budget'] = 130
train.loc[train['id'] == 2256, 'budget'] = 1
train.loc[train['id'] == 2696, 'budget'] = 10000000
# test异常处理
test = pd.read_csv('test.csv')
# Clean Data
test.loc[test['id'] == 6733, 'budget'] = 5000000
test.loc[test['id'] == 3889, 'budget'] = 15000000
test.loc[test['id'] == 6683, 'budget'] = 50000000
test.loc[test['id'] == 5704, 'budget'] = 4300000
test.loc[test['id'] == 6109, 'budget'] = 281756
test.loc[test['id'] == 7242, 'budget'] = 10000000
test.loc[test['id'] == 7021, 'budget'] = 17540562 # Two Is a Family
test.loc[test['id'] == 5591, 'budget'] = 4000000 # The Orphanage
test.loc[test['id'] == 4282, 'budget'] = 20000000 # Big Top Pee-wee
test.loc[test['id'] == 3033, 'budget'] = 250
test.loc[test['id'] == 3051, 'budget'] = 50
test.loc[test['id'] == 3084, 'budget'] = 337
test.loc[test['id'] == 3224, 'budget'] = 4
test.loc[test['id'] == 3594, 'budget'] = 25
test.loc[test['id'] == 3619, 'budget'] = 500
test.loc[test['id'] == 3831, 'budget'] = 3
test.loc[test['id'] == 3935, 'budget'] = 500
test.loc[test['id'] == 4049, 'budget'] = 995946
test.loc[test['id'] == 4424, 'budget'] = 3
test.loc[test['id'] == 4460, 'budget'] = 8
test.loc[test['id'] == 4555, 'budget'] = 1200000
test.loc[test['id'] == 4624, 'budget'] = 30
test.loc[test['id'] == 4645, 'budget'] = 500
test.loc[test['id'] == 4709, 'budget'] = 450
test.loc[test['id'] == 4839, 'budget'] = 7
test.loc[test['id'] == 3125, 'budget'] = 25
test.loc[test['id'] == 3142, 'budget'] = 1
test.loc[test['id'] == 3201, 'budget'] = 450
test.loc[test['id'] == 3222, 'budget'] = 6
test.loc[test['id'] == 3545, 'budget'] = 38
test.loc[test['id'] == 3670, 'budget'] = 18
test.loc[test['id'] == 3792, 'budget'] = 19
test.loc[test['id'] == 3881, 'budget'] = 7
test.loc[test['id'] == 3969, 'budget'] = 400
test.loc[test['id'] == 4196, 'budget'] = 6
test.loc[test['id'] == 4221, 'budget'] = 11
test.loc[test['id'] == 4222, 'budget'] = 500
test.loc[test['id'] == 4285, 'budget'] = 11
test.loc[test['id'] == 4319, 'budget'] = 1
test.loc[test['id'] == 4639, 'budget'] = 10
test.loc[test['id'] == 4719, 'budget'] = 45
test.loc[test['id'] == 4822, 'budget'] = 22
test.loc[test['id'] == 4829, 'budget'] = 20
test.loc[test['id'] == 4969, 'budget'] = 20
test.loc[test['id'] == 5021, 'budget'] = 40
test.loc[test['id'] == 5035, 'budget'] = 1
test.loc[test['id'] == 5063, 'budget'] = 14
test.loc[test['id'] == 5119, 'budget'] = 2
test.loc[test['id'] == 5214, 'budget'] = 30
test.loc[test['id'] == 5221, 'budget'] = 50
test.loc[test['id'] == 4903, 'budget'] = 15
test.loc[test['id'] == 4983, 'budget'] = 3
test.loc[test['id'] == 5102, 'budget'] = 28
test.loc[test['id'] == 5217, 'budget'] = 75
test.loc[test['id'] == 5224, 'budget'] = 3
test.loc[test['id'] == 5469, 'budget'] = 20
test.loc[test['id'] == 5840, 'budget'] = 1
test.loc[test['id'] == 5960, 'budget'] = 30
test.loc[test['id'] == 6506, 'budget'] = 11
test.loc[test['id'] == 6553, 'budget'] = 280
test.loc[test['id'] == 6561, 'budget'] = 7
test.loc[test['id'] == 6582, 'budget'] = 218
test.loc[test['id'] == 6638, 'budget'] = 5
test.loc[test['id'] == 6749, 'budget'] = 8
test.loc[test['id'] == 6759, 'budget'] = 50
test.loc[test['id'] == 6856, 'budget'] = 10
test.loc[test['id'] == 6858, 'budget'] = 100
test.loc[test['id'] == 6876, 'budget'] = 250
test.loc[test['id'] == 6972, 'budget'] = 1
test.loc[test['id'] == 7079, 'budget'] = 8000000
test.loc[test['id'] == 7150, 'budget'] = 118
test.loc[test['id'] == 6506, 'budget'] = 118
test.loc[test['id'] == 7225, 'budget'] = 6
test.loc[test['id'] == 7231, 'budget'] = 85
test.loc[test['id'] == 5222, 'budget'] = 5
test.loc[test['id'] == 5322, 'budget'] = 90
test.loc[test['id'] == 5350, 'budget'] = 70
test.loc[test['id'] == 5378, 'budget'] = 10
test.loc[test['id'] == 5545, 'budget'] = 80
test.loc[test['id'] == 5810, 'budget'] = 8
test.loc[test['id'] == 5926, 'budget'] = 300
test.loc[test['id'] == 5927, 'budget'] = 4
test.loc[test['id'] == 5986, 'budget'] = 1
test.loc[test['id'] == 6053, 'budget'] = 20
test.loc[test['id'] == 6104, 'budget'] = 1
test.loc[test['id'] == 6130, 'budget'] = 30
test.loc[test['id'] == 6301, 'budget'] = 150
test.loc[test['id'] == 6276, 'budget'] = 100
test.loc[test['id'] == 6473, 'budget'] = 100
test.loc[test['id'] == 6842, 'budget'] = 30
release_dates = pd.read_csv('release_dates_per_country.csv')
release_dates['id'] = range(1,7399)
release_dates.drop(['original_title','title'],axis = 1,inplace = True)
release_dates.index = release_dates['id']
train = pd.merge(train, release_dates, how='left', on=['id'])
test = pd.merge(test, release_dates, how='left', on=['id'])
test['revenue'] = np.nan
# 将从TMDB下载的其他特征进行合并
train = pd.merge(train, pd.read_csv('TrainAdditionalFeatures.csv'),
how='left', on=['imdb_id'])
test = pd.merge(test, pd.read_csv('TestAdditionalFeatures.csv'),
how='left', on=['imdb_id'])
# 添加额外的训练集,2000条
additionalTrainData = pd.read_csv('additionalTrainData.csv')
additionalTrainData['release_date'] = additionalTrainData['release_date'].astype('str')
additionalTrainData['release_date'] = additionalTrainData['release_date'].str.replace('-', '/')
train = pd.concat([train, additionalTrainData])
print('train.columns:', train.columns)
print('train.shape:', train.shape)
# 根据EDA分析结果,对revenue做数据平滑处理
train['revenue'] = np.log1p(train['revenue'])
y = train['revenue'].values
# json 格式属性列
json_cols = ['genres', 'production_companies', 'production_countries',
'spoken_languages', 'Keywords', 'cast', 'crew']
# 将json格式属性转化为dict格式
def get_dictionary(s):
try:
d = eval(s)
except:
d = {}
return d
for col in tqdm(json_cols + ['belongs_to_collection']):
train[col] = train[col].apply(lambda x: get_dictionary(x))
test[col] = test[col].apply(lambda x: get_dictionary(x))
# 统计json格式属性中各个类别出现的次数
def get_json_dict(df):
global json_cols
result = dict()
for e_col in json_cols:
d = dict()
rows = df[e_col].values
for row in rows:
if row is None: continue
for i in row:
if i['name'] not in d:
d[i['name']] = 0
d[i['name']] += 1
result[e_col] = d
return result
train_dict = get_json_dict(train)
test_dict = get_json_dict(test)
# 对json格式列,移除异常类别和出现次数过低类别
# 首先删除非train和test共同出现的类别
# 再删除数量少于10的类别
for col in json_cols:
remove = []
train_id = set(list(train_dict[col].keys()))
test_id = set(list(test_dict[col].keys()))
remove += list(train_id - test_id) + list(test_id - train_id)
for i in train_id.union(test_id) - set(remove):
if train_dict[col][i] < 10 or i == '':
remove += [i]
for i in remove:
if i in train_dict[col]:
del train_dict[col][i]
if i in test_dict[col]:
del test_dict[col][i]
# 对数据进行预处理
all_data = prepare(pd.concat([train, test]).reset_index(drop=True))
train = all_data.loc[:train.shape[0] - 1, :]
test = all_data.loc[train.shape[0]:, :]
print(train.columns)
train.head()
# In[53]:
# model 选择,使用XGBoost
from sklearn.model_selection import KFold
import xgboost as xgb
# 参数设置,随机种子为2019,十折交叉验证
random_seed = 2019
k = 10
fold = list(KFold(k, shuffle=True, random_state=random_seed).split(train))
np.random.seed(random_seed)
# XGBoost模型选择
def xgb_model(trn_x, trn_y, val_x, val_y, test, verbose):
params = {'objective': 'reg:linear',
'eta': 0.01,
'max_depth': 5,
'subsample': 0.8,
'colsample_bytree': 0.7,
'eval_metric': 'rmse',
'seed': random_seed,
'silent': True,
'n_estimators':10000,
'gamma':1.45,
'colsample_bylevel':0.6,
}
record = dict()
model = xgb.train(params
, xgb.DMatrix(trn_x, trn_y)
, 100000
, [(xgb.DMatrix(trn_x, trn_y), 'train'), (xgb.DMatrix(val_x, val_y), 'valid')]
, verbose_eval=verbose
, early_stopping_rounds=500
, callbacks=[xgb.callback.record_evaluation(record)])
best_idx = np.argmin(np.array(record['valid']['rmse']))
val_pred = model.predict(xgb.DMatrix(val_x), ntree_limit=model.best_ntree_limit)
test_pred = model.predict(xgb.DMatrix(test), ntree_limit=model.best_ntree_limit)
return {'val': val_pred, 'test': test_pred, 'error': record['valid']['rmse'][best_idx],
'importance': [i for k, i in model.get_score().items()]}
# In[54]:
# training using LightGBM
import lightgbm as lgb
def lgb_model(trn_x, trn_y, val_x, val_y, test, verbose):
params = {'objective': 'regression',
'num_leaves': 30,
'min_data_in_leaf': 10,
'max_depth': 5,
'learning_rate': 0.01,
# 'min_child_samples':100,
'feature_fraction': 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
'lambda_l1': 0.2,
"bagging_seed": random_seed,
"metric": 'rmse',
'subsample':.8,
'colsample_bytree':.9,
"random_state": random_seed,
'n_estimators':10000,
'min_child_samples': 100,
'boosting': 'gbdt',
'importance_type': 'gain',
'use_best_model': True,
"verbosity": -1}
record = dict()
model = lgb.train(params
, lgb.Dataset(trn_x, trn_y)
, num_boost_round=100000
, valid_sets=[lgb.Dataset(val_x, val_y)]
, verbose_eval=verbose
, early_stopping_rounds=500
, callbacks=[lgb.record_evaluation(record)]
)
best_idx = np.argmin(np.array(record['valid_0']['rmse']))
val_pred = model.predict(val_x, num_iteration=model.best_iteration)
test_pred = model.predict(test, num_iteration=model.best_iteration)
return {'val': val_pred, 'test': test_pred, 'error': record['valid_0']['rmse'][best_idx],
'importance': model.feature_importance('gain')}
# In[55]:
# training with catboost
from catboost import CatBoostRegressor
def cat_model(trn_x, trn_y, val_x, val_y, test, verbose):
model = CatBoostRegressor(iterations=10000,
learning_rate=0.01,
depth=5,
eval_metric='RMSE',
colsample_bylevel=0.8,
random_seed=random_seed,
bagging_temperature=0.2,
metric_period=None,
early_stopping_rounds=200
)
model.fit(trn_x, trn_y,
eval_set=(val_x, val_y),
use_best_model=True,
verbose=False)
val_pred = model.predict(val_x)
test_pred = model.predict(test)
return {'val': val_pred,
'test': test_pred,
'error': model.get_best_score()['validation_0']['RMSE']}
# In[56]:
# use 3 model to train
result_dict = dict()
val_pred = np.zeros(train.shape[0])
test_pred = np.zeros(test.shape[0])
final_err = 0
verbose = False
for i, (trn, val) in enumerate(fold):
print(i + 1, "fold. RMSE")
trn_x = train.loc[trn, :]
trn_y = y[trn]
val_x = train.loc[val, :]
val_y = y[val]
fold_val_pred = []
fold_test_pred = []
fold_err = []
# """ xgboost
start = datetime.now()
result = xgb_model(trn_x, trn_y, val_x, val_y, test, verbose)
fold_val_pred.append(result['val'] * 0.2)
fold_test_pred.append(result['test'] * 0.2)
fold_err.append(result['error'])
print("xgb model.", "{0:.5f}".format(result['error']), '(' + str(int((datetime.now() - start).seconds / 60)) + 'm)')
# """
# """ lightgbm
start = datetime.now()
result = lgb_model(trn_x, trn_y, val_x, val_y, test, verbose)
fold_val_pred.append(result['val'] * 0.4)
fold_test_pred.append(result['test'] * 0.4)
fold_err.append(result['error'])
print("lgb model.", "{0:.5f}".format(result['error']), '(' + str(int((datetime.now() - start).seconds / 60)) + 'm)')
# """
# """ catboost model
start = datetime.now()
result = cat_model(trn_x, trn_y, val_x, val_y, test, verbose)
fold_val_pred.append(result['val'] * 0.4)
fold_test_pred.append(result['test'] * 0.4)
fold_err.append(result['error'])
print("cat model.", "{0:.5f}".format(result['error']), '(' + str(int((datetime.now() - start).seconds / 60)) + 'm)')
# """
# mix result of multiple models
val_pred[val] += np.mean(np.array(fold_val_pred), axis=0)
# print(fold_test_pred)
# print(fold_test_pred.shape)
# print(fold_test_pred.columns)
test_pred += np.mean(np.array(fold_test_pred), axis=0) / k
final_err += (sum(fold_err) / len(fold_err)) / k
print("---------------------------")
print("avg err.", "{0:.5f}".format(sum(fold_err) / len(fold_err)))
print("blend err.", "{0:.5f}".format(np.sqrt(np.mean((np.mean(np.array(fold_val_pred), axis=0) - val_y) ** 2))))
print('')
print("fianl avg err.", final_err)
print("fianl blend err.", np.sqrt(np.mean((val_pred - y) ** 2)))
# In[60]:
sub = | pd.read_csv('sample_submission.csv') | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import(
range, long, StringIO, lrange, lmap, map, zip, cPickle as pickle, product
)
from pandas import read_pickle
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
from pandas.core.datetools import BDay
import pandas.core.common as com
from pandas import concat
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
class LegacySupport(object):
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
if compat.PY3:
raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
with open(filepath, 'rb') as f:
cls.frame = pickle.load(f)
filepath = os.path.join(pth, 'data', 'series.pickle')
with open(filepath, 'rb') as f:
cls.series = pickle.load(f)
def test_pass_offset_warn(self):
buf = StringIO()
sys.stderr = buf
| DatetimeIndex(start='1/1/2000', periods=10, offset='H') | pandas.DatetimeIndex |
"""
LCM: Linear time Closed item set Miner
as described in `http://lig-membres.imag.fr/termier/HLCM/hlcm.pdf`
url: https://github.com/scikit-mine/scikit-mine/blob/master/skmine/preprocessing/lcm.py
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
from collections import defaultdict
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sortedcontainers import SortedDict
from .utils import _check_min_supp
from .utils import filter_maximal
from .bitmaps import Bitmap
class LCM:
"""
Linear time Closed item set Miner.
LCM can be used as a preprocessing step, yielding some patterns
that will be later submitted to a custom acceptance criterion.
It can also be used to simply discover the set of closed itemsets from
a transactional dataset.
Parameters
----------
min_supp: int or float, default=0.2
The minimum support for itemsets to be rendered in the output
Either an int representing the absolute support, or a float for relative support
Default to 0.2 (20%)
n_jobs : int, default=1
The number of jobs to use for the computation. Each single item is attributed a job
to discover potential itemsets, considering this item as a root in the search space.
Processes are preffered over threads.
References
----------
.. [1]
<NAME>, <NAME>, <NAME>
"LCM ver. 2: Efficient mining algorithms for frequent/closed/maximal itemsets", 2004
.. [2] Alexandre Termier
"Pattern mining rock: more, faster, better"
Examples
--------
from skmine.preprocessing import LCM
from skmine.datasets.fimi import fetch_chess
chess = fetch_chess()
lcm = LCM(min_supp=2000)
patterns = lcm.fit_discover(chess) # doctest: +SKIP
patterns.head() # doctest: +SKIP
itemset support
0 (58) 3195
1 (11, 58) 2128
2 (15, 58) 2025
3 (17, 58) 2499
4 (21, 58) 2224
patterns[patterns.itemset.map(len) > 3] # doctest: +SKIP
"""
def __init__(self, *, min_supp=0.2, n_jobs=1, verbose=False):
_check_min_supp(min_supp)
self.min_supp = min_supp # provided by user
self._min_supp = _check_min_supp(self.min_supp)
self.item_to_tids = None
self.n_transactions = 0
self.ctr = 0
self.n_jobs = n_jobs
self.verbose = verbose
def _fit(self, D):
self.n_transactions = 0 # reset for safety
item_to_tids = defaultdict(Bitmap)
for transaction in D:
for item in transaction:
item_to_tids[item].add(self.n_transactions)
self.n_transactions += 1
print(D)
print(item_to_tids)
if isinstance(self.min_supp, float):
# make support absolute if needed
self._min_supp = self.min_supp * self.n_transactions
low_supp_items = [k for k, v in item_to_tids.items() if len(v) < self._min_supp]
for item in low_supp_items:
del item_to_tids[item]
self.item_to_tids = SortedDict(item_to_tids)
return self
def fit_discover(self, D, return_tids=False):
"""fit LCM on the transactional database, and return the set of
closed itemsets in this database, with respect to the minium support
Different from ``fit_transform``, see the `Returns` section below.
Parameters
----------
D : pd.Series or Iterable
The input transactional database
Where every entry contain singular items
Items must be both hashable and comparable
return_tids: bool
Either to return transaction ids along with itemset.
Default to False, will return supports instead
Returns
-------
pd.DataFrame
DataFrame with the following columns
========== =================================
itemset a `tuple` of co-occured items
support frequence for this itemset
========== =================================
if `return_tids=True` then
========== =================================
itemset a `tuple` of co-occured items
tids a bitmap tracking positions
========== =================================
Example
-------
from skmine.preprocessing import LCM
D = [[1, 2, 3, 4, 5, 6], [2, 3, 5], [2, 5]]
LCM(min_supp=2).fit_discover(D)
itemset support
0 (2, 5) 3
1 (2, 3, 5) 2
LCM(min_supp=2).fit_discover(D, return_tids=True) # doctest: +SKIP
itemset tids
0 (2, 5) [0, 1, 2]
1 (2, 3, 5) [0, 1]
"""
self._fit(D)
empty_df = | pd.DataFrame(columns=['itemset', 'tids']) | pandas.DataFrame |
#!/usr/bin/env python3
"""
process output from simulations with cluster structures with no lags
"""
# import relevant modules and functions
import pickle
import pandas as pd
folder = 'simulation_study/simulations_cluster_results/'
theta_1_2_s = [1040, 1045, 1050, 1055, 1060]
res_table = {}
for j in range(len(theta_1_2_s)):
source_file = open(folder+"simulation_cluster_part0" + str(j) + "_results.pkl", "rb")
res_table_ = pickle.load(source_file)
source_file.close()
res_table[theta_1_2_s[j]] = res_table_[theta_1_2_s[j]]
logit_p_s = [-i for i in range(60, 140, 10)]
lambdas_s = [0, 0.2, 0.4, 0.6, 0.8]
graph_type = ['rchain', 'lattice']
theta_1_2_s = [1040, 1045, 1050, 1055, 1060]
# print for plot in R...
pd.Series(graph_type).to_csv(folder+'simulation_clust_graph_type.csv')
pd.Series(theta_1_2_s).to_csv(folder+'simulation_clust_thetas.csv')
| pd.Series(logit_p_s) | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[ | Timestamp('20160101', tz='US/Eastern') | pandas.Timestamp |
import os
import sys
import pytest
from shapely.geometry import Polygon, GeometryCollection
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from tests.fixtures import *
from tests.test_core_components_route import self_looping_route, route
from tests.test_core_components_service import service
from genet.inputs_handler import matsim_reader, gtfs_reader
from genet.inputs_handler import read
from genet.schedule_elements import Schedule, Service, Route, Stop, read_vehicle_types
from genet.utils import plot, spatial
from genet.validate import schedule_validation
from genet.exceptions import ServiceIndexError, RouteIndexError, StopIndexError, UndefinedCoordinateSystemError, \
ConflictingStopData, InconsistentVehicleModeError
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture()
def schedule():
route_1 = Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='2',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
@pytest.fixture()
def strongly_connected_schedule():
route_1 = Route(route_short_name='name',
mode='bus',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='3', x=3, y=3, epsg='epsg:27700', name='Stop_3'),
Stop(id='4', x=7, y=5, epsg='epsg:27700', name='Stop_4'),
Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['1', '2'], departure_offsets=['1', '2'],
id='1')
route_2 = Route(route_short_name='name_2',
mode='bus',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='7', x=3, y=3, epsg='epsg:27700', name='Stop_7'),
Stop(id='8', x=7, y=5, epsg='epsg:27700', name='Stop_8'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['1', '2', '3', '4', '5'],
departure_offsets=['1', '2', '3', '4', '5'],
id='2')
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
def test_initiating_schedule(schedule):
s = schedule
assert_semantically_equal(dict(s._graph.nodes(data=True)), {
'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'6': {'services': {'service'}, 'routes': {'2'}, 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()}})
assert_semantically_equal(s._graph.edges(data=True)._adjdict,
{'5': {'6': {'services': {'service'}, 'routes': {'2'}}},
'6': {'7': {'services': {'service'}, 'routes': {'2'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}}, '8': {}, '4': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}})
log = s._graph.graph.pop('change_log')
assert log.empty
assert_semantically_equal(s._graph.graph,
{'name': 'Schedule graph',
'routes': {'2': {'route_short_name': 'name_2', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '2', 'route': [],
'await_departure': [],
'ordered_stops': ['5', '6', '7', '8']},
'1': {'route_short_name': 'name', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [],
'ordered_stops': ['1', '2', '3', '4']}},
'services': {'service': {'id': 'service', 'name': 'name'}},
'route_to_service_map': {'1': 'service', '2': 'service'},
'service_to_route_map': {'service': ['1', '2']},
'crs': {'init': 'epsg:27700'}})
def test_initiating_schedule_with_non_uniquely_indexed_objects():
route_1 = Route(route_short_name='name',
mode='bus', id='',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_2_bus', 'veh_3_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service1 = Service(id='service', routes=[route_1, route_2])
service2 = Service(id='service', routes=[route_1, route_2])
s = Schedule(epsg='epsg:27700', services=[service1, service2])
assert s.number_of_routes() == 4
assert len(s) == 2
def test__getitem__returns_a_service(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert schedule['service'] == services[0]
def test_accessing_route(schedule):
assert schedule.route('1') == Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'),
Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'),
Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['1', '2'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
def test__repr__shows_number_of_services(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
schedule = Schedule('epsg:27700')
s = schedule.__repr__()
assert 'instance at' in s
assert 'services' in s
Schedule.__len__.assert_called()
def test__str__shows_info():
schedule = Schedule('epsg:27700')
assert 'Number of services' in schedule.__str__()
assert 'Number of routes' in schedule.__str__()
def test__len__returns_the_number_of_services(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert len(schedule) == 1
def test_print_shows_info(mocker):
mocker.patch.object(Schedule, 'info')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.info.assert_called_once()
def test_info_shows_number_of_services_and_routes(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
mocker.patch.object(Schedule, 'number_of_routes')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.__len__.assert_called()
Schedule.number_of_routes.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker, schedule):
mocker.patch.object(plot, 'plot_graph')
schedule.plot()
plot.plot_graph.assert_called_once()
def test_reproject_changes_projection_for_all_stops_in_route():
correct_x_y = {'x': -0.14967658860132668, 'y': 51.52393050617373}
schedule = Schedule(
'epsg:27700',
[Service(id='10314', routes=[
Route(
route_short_name='12',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_1_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])])
schedule.reproject('epsg:4326')
_stops = list(schedule.stops())
stops = dict(zip([stop.id for stop in _stops], _stops))
assert_semantically_equal({'x': stops['26997928P'].x, 'y': stops['26997928P'].y}, correct_x_y)
assert_semantically_equal({'x': stops['26997928P.link:1'].x, 'y': stops['26997928P.link:1'].y}, correct_x_y)
def test_adding_merges_separable_schedules(route):
schedule = Schedule(epsg='epsg:4326', services=[Service(id='1', routes=[route])])
before_graph_nodes = schedule.reference_nodes()
before_graph_edges = schedule.reference_edges()
a = Stop(id='10', x=40, y=20, epsg='epsg:27700', linkRefId='1')
b = Stop(id='20', x=10, y=20, epsg='epsg:27700', linkRefId='2')
c = Stop(id='30', x=30, y=30, epsg='epsg:27700', linkRefId='3')
d = Stop(id='40', x=70, y=50, epsg='epsg:27700', linkRefId='4')
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[Service(id='2', routes=[
Route(
route_short_name='name',
mode='bus',
stops=[a, b, c, d],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['04:40:00', '05:40:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'],
route=['1', '2', '3', '4'], id='2')
])])
tba_graph_nodes = schedule_to_be_added.reference_nodes()
tba_graph_edges = schedule_to_be_added.reference_edges()
schedule.add(schedule_to_be_added)
assert '1' in list(schedule.service_ids())
assert '2' in list(schedule.service_ids())
assert '1' in list(schedule.route_ids())
assert '2' in list(schedule.route_ids())
assert schedule.epsg == 'epsg:4326'
assert schedule.epsg == schedule_to_be_added.epsg
assert set(schedule._graph.nodes()) == set(before_graph_nodes) | set(tba_graph_nodes)
assert set(schedule._graph.edges()) == set(before_graph_edges) | set(tba_graph_edges)
def test_adding_throws_error_when_schedules_not_separable(test_service):
schedule = Schedule(epsg='epsg:4326', services=[test_service])
assert 'service' in schedule
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[test_service])
with pytest.raises(NotImplementedError) as e:
schedule.add(schedule_to_be_added)
assert 'This method only supports adding non overlapping services' in str(e.value)
def test_adding_calls_on_reproject_when_schedules_dont_have_matching_epsg(test_service, different_test_service, mocker):
mocker.patch.object(Schedule, 'reproject')
schedule = Schedule(services=[test_service], epsg='epsg:27700')
assert schedule.has_service('service')
schedule_to_be_added = Schedule(services=[different_test_service], epsg='epsg:4326')
schedule.add(schedule_to_be_added)
schedule_to_be_added.reproject.assert_called_once_with('epsg:27700')
def test_service_ids_returns_keys_of_the_services_dict(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert set(schedule.service_ids()) == {'service'}
def test_routes_returns_service_ids_with_unique_routes(route, similar_non_exact_test_route):
services = [Service(id='1', routes=[route]), Service(id='2', routes=[similar_non_exact_test_route])]
schedule = Schedule(services=services, epsg='epsg:4326')
routes = list(schedule.routes())
assert route in routes
assert similar_non_exact_test_route in routes
assert len(routes) == 2
def test_number_of_routes_counts_routes(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4362')
assert schedule.number_of_routes() == 3
def test_service_attribute_data_under_key(schedule):
df = schedule.service_attribute_data(keys='name').sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}}
))
def test_service_attribute_data_under_keys(schedule):
df = schedule.service_attribute_data(keys=['name', 'id']).sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}, 'id': {'service': 'service'}}
))
def test_route_attribute_data_under_key(schedule):
df = schedule.route_attribute_data(keys='route_short_name').sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}}
))
def test_route_attribute_data_under_keys(schedule):
df = schedule.route_attribute_data(keys=['route_short_name', 'mode']).sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}, 'mode': {'1': 'bus', '2': 'bus'}}
))
def test_stop_attribute_data_under_key(schedule):
df = schedule.stop_attribute_data(keys='x').sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0}}))
def test_stop_attribute_data_under_keys(schedule):
df = schedule.stop_attribute_data(keys=['x', 'y']).sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0},
'y': {'1': 2.0, '2': 2.0, '3': 3.0, '4': 5.0, '5': 2.0, '6': 2.0, '7': 3.0, '8': 5.0}}))
def test_extracting_services_on_condition(schedule):
ids = schedule.extract_service_ids_on_attributes(conditions={'name': 'name'})
assert ids == ['service']
def test_extracting_routes_on_condition(schedule):
ids = schedule.extract_route_ids_on_attributes(conditions=[{'mode': 'bus'}, {'route_short_name': 'name_2'}],
how=all)
assert ids == ['2']
def test_extracting_stops_on_condition(schedule):
ids = schedule.extract_stop_ids_on_attributes(conditions=[{'x': (0, 4)}, {'y': (0, 2)}], how=all)
assert set(ids) == {'5', '6', '1', '2'}
def test_getting_services_on_modal_condition(schedule):
service_ids = schedule.services_on_modal_condition(modes='bus')
assert service_ids == ['service']
def test_getting_routes_on_modal_condition(schedule):
route_ids = schedule.routes_on_modal_condition(modes='bus')
assert set(route_ids) == {'1', '2'}
def test_getting_stops_on_modal_condition(schedule):
stop_ids = schedule.stops_on_modal_condition(modes='bus')
assert set(stop_ids) == {'5', '6', '7', '8', '3', '1', '4', '2'}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_getting_stops_on_spatial_condition_with_geojson(schedule, mocker):
mocker.patch.object(spatial, 'read_geojson_to_shapely',
return_value=GeometryCollection(
[Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])]))
stops = schedule.stops_on_spatial_condition(test_geojson)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_shapely_polygon(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
stops = schedule.stops_on_spatial_condition(p)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_s2_hex_region(schedule):
s2_region = '4837,4839,483f5,4844,4849'
stops = schedule.stops_on_spatial_condition(s2_region)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_routes_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p)
assert set(routes) == {'1', '2'}
def test_getting_routes_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p, how='within')
assert set(routes) == {'1', '2'}
def test_getting_services_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p)
assert set(routes) == {'service'}
def test_getting_services_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p, how='within')
assert set(routes) == {'service'}
def test_applying_attributes_to_service(schedule):
assert schedule._graph.graph['services']['service']['name'] == 'name'
assert schedule['service'].name == 'name'
schedule.apply_attributes_to_services({'service': {'name': 'new_name'}})
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_attributes_changing_id_to_service_throws_error(schedule):
assert 'service' in schedule._graph.graph['services']
assert schedule._graph.graph['services']['service']['id'] == 'service'
assert schedule['service'].id == 'service'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_services({'service': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_route(schedule):
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'name'
assert schedule.route('1').route_short_name == 'name'
schedule.apply_attributes_to_routes({'1': {'route_short_name': 'new_name'}})
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'new_name'
assert schedule.route('1').route_short_name == 'new_name'
def test_applying_mode_attributes_to_route_results_in_correct_mode_methods(schedule):
assert schedule.route('1').mode == 'bus'
assert schedule.modes() == {'bus'}
assert schedule.mode_graph_map() == {
'bus': {('3', '4'), ('2', '3'), ('1', '2'), ('6', '7'), ('5', '6'), ('7', '8')}}
schedule.apply_attributes_to_routes({'1': {'mode': 'new_bus'}})
assert schedule.route('1').mode == 'new_bus'
assert schedule.modes() == {'bus', 'new_bus'}
assert schedule['service'].modes() == {'bus', 'new_bus'}
assert schedule.mode_graph_map() == {'bus': {('7', '8'), ('6', '7'), ('5', '6')},
'new_bus': {('3', '4'), ('1', '2'), ('2', '3')}}
assert schedule['service'].mode_graph_map() == {'bus': {('6', '7'), ('7', '8'), ('5', '6')},
'new_bus': {('3', '4'), ('2', '3'), ('1', '2')}}
def test_applying_attributes_changing_id_to_route_throws_error(schedule):
assert '1' in schedule._graph.graph['routes']
assert schedule._graph.graph['routes']['1']['id'] == '1'
assert schedule.route('1').id == '1'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'1': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_stop(schedule):
assert schedule._graph.nodes['5']['name'] == ''
assert schedule.stop('5').name == ''
schedule.apply_attributes_to_stops({'5': {'name': 'new_name'}})
assert schedule._graph.nodes['5']['name'] == 'new_name'
assert schedule.stop('5').name == 'new_name'
def test_applying_attributes_changing_id_to_stop_throws_error(schedule):
assert '5' in schedule._graph.nodes
assert schedule._graph.nodes['5']['id'] == '5'
assert schedule.stop('5').id == '5'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'5': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def change_name(attrib):
return 'new_name'
def test_applying_function_to_services(schedule):
schedule.apply_function_to_services(function=change_name, location='name')
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_function_to_routes(schedule):
schedule.apply_function_to_routes(function=change_name, location='route_short_name')
for route in schedule.routes():
assert schedule._graph.graph['routes'][route.id]['route_short_name'] == 'new_name'
assert route.route_short_name == 'new_name'
def test_applying_function_to_stops(schedule):
schedule.apply_function_to_stops(function=change_name, location='name')
for stop in schedule.stops():
assert stop.name == 'new_name'
assert schedule._graph.nodes[stop.id]['name'] == 'new_name'
def test_adding_service(schedule, service):
service.reindex('different_service')
service.route('1').reindex('different_service_1')
service.route('2').reindex('different_service_2')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_route_ids(schedule, service):
service.reindex('different_service')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_id_throws_error(schedule, service):
with pytest.raises(ServiceIndexError) as e:
schedule.add_service(service)
assert 'already exists' in str(e.value)
def test_adding_service_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service', 'some_id'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
s = Service(id='some_id', routes=[r])
schedule.add_service(s, force=True)
assert_semantically_equal(dict(s.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(s.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'some_id', 'service'}})
assert_semantically_equal(s.graph()['2']['5'], {'routes': {'3'}, 'services': {'some_id'}})
def test_adding_service_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_service(Service(id='some_id', routes=[r]))
assert 'The following stops would inherit data' in str(e.value)
def test_removing_service(schedule):
schedule.remove_service('service')
assert not set(schedule.route_ids())
assert not set(schedule.service_ids())
assert not schedule._graph.graph['route_to_service_map']
assert not schedule._graph.graph['service_to_route_map']
def test_adding_route(schedule, route):
route.reindex('new_id')
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'new_id'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'new_id': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'new_id']})
def test_adding_route_with_clashing_id(schedule, route):
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'service_3'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'service_3': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'service_3']})
def test_adding_route_to_non_existing_service_throws_error(schedule, route):
with pytest.raises(ServiceIndexError) as e:
schedule.add_route('service_that_doesnt_exist', route)
assert 'does not exist' in str(e.value)
def test_creating_a_route_to_add_using_id_references_to_existing_stops_inherits_schedule_stops_data(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['1', '2', '5']
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}}, '2': {'routes': {'3'}}, '5': {'routes': {'3'}}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_creating_a_route_to_add_giving_existing_schedule_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[schedule.stop('1'), schedule.stop('2'), schedule.stop('5')]
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'routes': {'3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'5': {'routes': {'3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
schedule.add_route('service', r, force=True)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_only_flags_those_that_are_actually_different(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='')]
)
assert r.ordered_stops == ['1', '2', '5']
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert "The following stops would inherit data currently stored under those Stop IDs in the Schedule: " \
"['1', '2']" in str(e.value)
def test_adding_route_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert 'The following stops would inherit data' in str(e.value)
def test_extracting_epsg_from_an_intermediate_route_gives_none():
# intermediate meaning not belonging to a schedule yet but referring to stops in a schedule
r = Route(
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['S1', 'S2', 'S3']
)
assert r.epsg is None
def test_removing_route(schedule):
schedule.remove_route('2')
assert set(schedule.route_ids()) == {'1'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1']})
def test_removing_route_updates_services_on_nodes_and_edges(schedule):
schedule.remove_route('2')
assert_semantically_equal(dict(schedule.graph().nodes(data=True)),
{'5': {'services': set(), 'routes': set(), 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set()},
'6': {'services': set(), 'routes': set(), 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set()},
'7': {'services': set(), 'routes': set(), 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set()},
'8': {'services': set(), 'routes': set(), 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76683608549253,
'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76682779861249,
'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766825803756994,
'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766856648946295,
'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()}})
assert_semantically_equal(schedule.graph().edges(data=True)._adjdict,
{'5': {'6': {'services': set(), 'routes': set()}},
'6': {'7': {'services': set(), 'routes': set()}},
'7': {'8': {'services': set(), 'routes': set()}}, '8': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}, '4': {}})
def test_removing_stop(schedule):
schedule.remove_stop('5')
assert {stop.id for stop in schedule.stops()} == {'1', '3', '4', '7', '8', '6', '2'}
def test_removing_unused_stops(schedule):
schedule.remove_route('1')
schedule.remove_unsused_stops()
assert {stop.id for stop in schedule.stops()} == {'6', '8', '5', '7'}
def test_iter_stops_returns_stops_objects(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4326')
assert set([stop.id for stop in schedule.stops()]) == {'0', '1', '2', '3', '4'}
assert all([isinstance(stop, Stop) for stop in schedule.stops()])
def test_read_matsim_schedule_returns_expected_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
epsg='epsg:27700')
correct_services = Service(id='10314', routes=[
Route(
route_short_name='12', id='VJbd8660f05fe6f744e58a66ae12bd66acbca88b98',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_0_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])
for val in schedule.services():
assert val == correct_services
assert_semantically_equal(schedule.stop_to_service_ids_map(),
{'26997928P.link:1': {'10314'}, '26997928P': {'10314'}})
assert_semantically_equal(schedule.stop_to_route_ids_map(),
{'26997928P': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'26997928P.link:1': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'}})
assert_semantically_equal(schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').trips,
{'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'], 'vehicle_id': ['veh_0_bus']})
assert_semantically_equal(
dict(schedule.graph().nodes(data=True)),
{'26997928P': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P', 'x': 528464.1342843144, 'y': 182179.7435136598, 'epsg': 'epsg:27700',
'name': '<NAME> (Stop P)', 'lat': 51.52393050617373, 'lon': -0.14967658860132668,
's2_id': 5221390302759871369, 'additional_attributes': {'name', 'isBlocking'},
'isBlocking': 'false'},
'26997928P.link:1': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P.link:1', 'x': 528464.1342843144, 'y': 182179.7435136598,
'epsg': 'epsg:27700', 'name': 'Brunswick Place (Stop P)', 'lat': 51.52393050617373,
'lon': -0.14967658860132668, 's2_id': 5221390302759871369,
'additional_attributes': {'name', 'linkRefId', 'isBlocking'}, 'linkRefId': '1',
'isBlocking': 'false'}}
)
def test_reading_vehicles_with_a_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_reading_vehicles_after_reading_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_is_strongly_connected_with_strongly_connected_schedule(strongly_connected_schedule):
assert strongly_connected_schedule.is_strongly_connected()
def test_is_strongly_connected_with_not_strongly_connected_schedule(schedule):
assert not schedule.is_strongly_connected()
def test_has_self_loops_with_self_has_self_looping_schedule(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
assert s.has_self_loops()
def test_has_self_loops_returns_self_looping_stops(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
loop_nodes = s.has_self_loops()
assert loop_nodes == ['1']
def test_has_self_loops_with_non_looping_routes(schedule):
assert not schedule.has_self_loops()
def test_validity_of_services(self_looping_route, route):
s = Schedule('epsg:27700', [Service(id='1', routes=[self_looping_route]),
Service(id='2', routes=[route])])
assert not s['1'].is_valid_service()
assert s['2'].is_valid_service()
assert set(s.validity_of_services()) == {False, True}
def test_has_valid_services(schedule):
assert not schedule.has_valid_services()
def test_has_valid_services_with_only_valid_services(service):
s = Schedule('epsg:27700', [service])
assert s.has_valid_services()
def test_invalid_services_shows_invalid_services(service):
for route_id in service.route_ids():
service._graph.graph['routes'][route_id]['route'] = ['1']
s = Schedule('epsg:27700', [service])
assert s.invalid_services() == [service]
def test_is_valid_with_valid_schedule(service):
s = Schedule('epsg:27700', [service])
assert s.is_valid_schedule()
def test_generate_validation_report_delegates_to_method_in_schedule_operations(mocker, schedule):
mocker.patch.object(schedule_validation, 'generate_validation_report')
schedule.generate_validation_report()
schedule_validation.generate_validation_report.assert_called_once()
def test_build_graph_builds_correct_graph(strongly_connected_schedule):
g = strongly_connected_schedule.graph()
assert_semantically_equal(dict(g.nodes(data=True)),
{'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_5'},
'2': {'services': {'service'}, 'routes': {'1', '2'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set(), 'name': 'Stop_2'},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_7'},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_8'},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_3'},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_1'},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_4'}})
assert_semantically_equal(g.edges(data=True)._adjdict,
{'5': {'2': {'services': {'service'}, 'routes': {'2'}}},
'2': {'7': {'services': {'service'}, 'routes': {'2'}},
'3': {'services': {'service'}, 'routes': {'1'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}},
'8': {'5': {'services': {'service'}, 'routes': {'2'}}},
'4': {'1': {'services': {'service'}, 'routes': {'1'}}},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}}})
def test_building_trips_dataframe(schedule):
df = schedule.route_trips_with_stops_to_dataframe()
correct_df = DataFrame({'departure_time': {0: Timestamp('1970-01-01 13:00:00'), 1: Timestamp('1970-01-01 13:05:00'),
2: Timestamp('1970-01-01 13:09:00'), 3: Timestamp('1970-01-01 13:30:00'),
4: Timestamp('1970-01-01 13:35:00'), 5: Timestamp('1970-01-01 13:39:00'),
6: Timestamp('1970-01-01 11:00:00'), 7: Timestamp('1970-01-01 11:05:00'),
8: Timestamp('1970-01-01 11:09:00'), 9: Timestamp('1970-01-01 13:00:00'),
10: Timestamp('1970-01-01 13:05:00'),
11: Timestamp('1970-01-01 13:09:00')},
'arrival_time': {0: Timestamp('1970-01-01 13:03:00'), 1: Timestamp('1970-01-01 13:07:00'),
2: Timestamp('1970-01-01 13:13:00'), 3: Timestamp('1970-01-01 13:33:00'),
4: Timestamp('1970-01-01 13:37:00'), 5: Timestamp('1970-01-01 13:43:00'),
6: Timestamp('1970-01-01 11:03:00'), 7: Timestamp('1970-01-01 11:07:00'),
8: Timestamp('1970-01-01 11:13:00'), 9: Timestamp('1970-01-01 13:03:00'),
10: Timestamp('1970-01-01 13:07:00'),
11: | Timestamp('1970-01-01 13:13:00') | pandas.Timestamp |
# -*- coding: utf-8 -*
'''问卷数据分析工具包
Created on Tue Nov 8 20:05:36 2016
@author: JSong
1、针对问卷星数据,编写并封装了很多常用算法
2、利用report工具包,能将数据直接导出为PPTX
该工具包支持一下功能:
1、编码问卷星、问卷网等数据
2、封装描述统计和交叉分析函数
3、支持生成一份整体的报告和相关数据
'''
import os
import re
import sys
import math
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .. import report as rpt
from .. import associate
__all__=['read_code',
'save_code',
'spec_rcode',
'dataText_to_code',
'dataCode_to_text',
'var_combine',
'wenjuanwang',
'wenjuanxing',
'load_data',
'read_data',
'save_data',
'data_merge',
'clean_ftime',
'data_auto_code',
'qdata_flatten',
'sample_size_cal',
'confidence_interval',
'gof_test',
'chi2_test',
'fisher_exact',
'anova',
'mca',
'cluster',
'scatter',
'sankey',
'qtable',
'association_rules',
'contingency',
'cross_chart',
'summary_chart',
'onekey_gen',
'scorpion']
#=================================================================
#
#
# 【问卷数据处理】
#
#
#==================================================================
def read_code(filename):
'''读取code编码文件并输出为字典格式
1、支持json格式
2、支持本包规定的xlsx格式
see alse to_code
'''
file_type=os.path.splitext(filename)[1][1:]
if file_type == 'json':
import json
code=json.load(filename)
return code
d=pd.read_excel(filename,header=None)
d=d[d.any(axis=1)]#去除空行
d.fillna('NULL',inplace=True)
d=d.as_matrix()
code={}
for i in range(len(d)):
tmp=d[i,0].strip()
if tmp == 'key':
# 识别题号
code[d[i,1]]={}
key=d[i,1]
elif tmp in ['qlist','code_order']:
# 识别字典值为列表的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp2=list(d[i:j,1])
# 列表中字符串的格式化,去除前后空格
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
elif tmp in ['code','code_r']:
# 识别字典值为字典的字段
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
# 识别其他的列表字段
elif (tmp!='NULL') and (d[i,2]=='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
if i==len(d)-1:
code[key][tmp]=d[i,1]
else:
tmp2=list(d[i:j,1])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
code[key][tmp]=tmp2
# 识别其他的字典字段
elif (tmp!='NULL') and (d[i,2]!='NULL') and ((i==len(d)-1) or (d[i+1,0]=='NULL')):
ind=np.argwhere(d[i+1:,0]!='NULL')
if len(ind)>0:
j=i+1+ind[0][0]
else:
j=len(d)
tmp1=list(d[i:j,1])
tmp2=list(d[i:j,2])
for i in range(len(tmp2)):
if isinstance(tmp2[i],str):
tmp2[i]=tmp2[i].strip()
#tmp2=[s.strip() for s in tmp2 if isinstance(s,str) else s]
code[key][tmp]=dict(zip(tmp1,tmp2))
elif tmp == 'NULL':
continue
else:
code[key][tmp]=d[i,1]
return code
def save_code(code,filename='code.xlsx'):
'''code本地输出
1、输出为json格式,根据文件名自动识别
2、输出为Excel格式
see also read_code
'''
save_type=os.path.splitext(filename)[1][1:]
if save_type == 'json':
code=pd.DataFrame(code)
code.to_json(filename,force_ascii=False)
return
tmp=pd.DataFrame(columns=['name','value1','value2'])
i=0
if all(['Q' in c[0] for c in code.keys()]):
key_qlist=sorted(code,key=lambda c:int(re.findall('\d+',c)[0]))
else:
key_qlist=code.keys()
for key in key_qlist:
code0=code[key]
tmp.loc[i]=['key',key,'']
i+=1
#print(key)
for key0 in code0:
tmp2=code0[key0]
if (type(tmp2) == list) and tmp2:
tmp.loc[i]=[key0,tmp2[0],'']
i+=1
for ll in tmp2[1:]:
tmp.loc[i]=['',ll,'']
i+=1
elif (type(tmp2) == dict) and tmp2:
try:
tmp2_key=sorted(tmp2,key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp2_key=list(tmp2.keys())
j=0
for key1 in tmp2_key:
if j==0:
tmp.loc[i]=[key0,key1,tmp2[key1]]
else:
tmp.loc[i]=['',key1,tmp2[key1]]
i+=1
j+=1
else:
if tmp2:
tmp.loc[i]=[key0,tmp2,'']
i+=1
if sys.version>'3':
tmp.to_excel(filename,index=False,header=False)
else:
tmp.to_csv(filename,index=False,header=False,encoding='utf-8')
'''问卷数据导入和编码
对每一个题目的情形进行编码:题目默认按照Q1、Q2等给出
Qn.content: 题目内容
Qn.qtype: 题目类型,包含:单选题、多选题、填空题、排序题、矩阵单选题等
Qn.qlist: 题目列表,例如多选题对应着很多小题目
Qn.code: dict,题目选项编码
Qn.code_r: 题目对应的编码(矩阵题目专有)
Qn.code_order: 题目类别的顺序,用于PPT报告的生成[一般后期添加]
Qn.name: 特殊类型,包含:城市题、NPS题等
Qn.weight:dict,每个选项的权重
'''
def dataText_to_code(df,sep,qqlist=None):
'''编码文本数据
'''
if sep in [';','┋']:
qtype='多选题'
elif sep in ['-->','→']:
qtype='排序题'
if not qqlist:
qqlist=df.columns
# 处理多选题
code={}
for qq in qqlist:
tmp=df[qq].map(lambda x : x.split(sep) if isinstance(x,str) else [])
item_list=sorted(set(tmp.sum()))
if qtype == '多选题':
tmp=tmp.map(lambda x: [int(t in x) for t in item_list])
code_tmp={'code':{},'qtype':u'多选题','qlist':[],'content':qq}
elif qtype == '排序题':
tmp=tmp.map(lambda x:[x.index(t)+1 if t in x else np.nan for t in item_list])
code_tmp={'code':{},'qtype':u'排序题','qlist':[],'content':qq}
for i,t in enumerate(item_list):
column_name='{}_A{:.0f}'.format(qq,i+1)
df[column_name]=tmp.map(lambda x:x[i])
code_tmp['code'][column_name]=item_list[i]
code_tmp['qlist']=code_tmp['qlist']+[column_name]
code[qq]=code_tmp
df.drop(qq,axis=1,inplace=True)
return df,code
def dataCode_to_text(df,code=None):
'''将按序号数据转换成文本
'''
if df.max().max()>1:
sep='→'
else:
sep='┋'
if code:
df=df.rename(code)
qlist=list(df.columns)
df['text']=np.nan
if sep in ['┋']:
for i in df.index:
w=df.loc[i,:]==1
df.loc[i,'text']=sep.join(list(w.index[w]))
elif sep in ['→']:
for i in df.index:
w=df.loc[i,:]
w=w[w>=1].sort_values()
df.loc[i,'text']=sep.join(list(w.index))
df.drop(qlist,axis=1,inplace=True)
return df
def var_combine(data,code,qq1,qq2,sep=',',qnum_new=None,qname_new=None):
'''将两个变量组合成一个变量
例如:
Q1:'性别',Q2: 年龄
组合后生成:
1、男_16~19岁
2、男_20岁~40岁
3、女_16~19岁
4、女_20~40岁
'''
if qnum_new is None:
if 'Q'==qq2[0]:
qnum_new=qq1+'_'+qq2[1:]
else:
qnum_new=qq1+'_'+qq2
if qname_new is None:
qname_new=code[qq1]['content']+'_'+code[qq2]['content']
if code[qq1]['qtype']!='单选题' or code[qq2]['qtype']!='单选题':
print('只支持组合两个单选题,请检查.')
raise
d1=data[code[qq1]['qlist'][0]]
d2=data[code[qq2]['qlist'][0]]
sm=max(code[qq1]['code'].keys())# 进位制
sn=max(code[qq2]['code'].keys())# 进位制
if isinstance(sm,str) or isinstance(sn,str):
print('所选择的两个变量不符合函数要求.')
raise
data[qnum_new]=(d1-1)*sn+d2
code[qnum_new]={'qtype':'单选题','qlist':[qnum_new],'content':qname_new}
code_tmp={}
for c1 in code[qq1]['code']:
for c2 in code[qq2]['code']:
cc=(c1-1)*sn+c2
value='{}{}{}'.format(code[qq1]['code'][c1],sep,code[qq2]['code'][c2])
code_tmp[cc]=value
code[qnum_new]['code']=code_tmp
print('变量已合并,新变量题号为:{}'.format(qnum_new))
return data,code
def wenjuanwang(filepath='.\\data',encoding='gbk'):
'''问卷网数据导入和编码
输入:
filepath:
列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件
文件夹路径,函数会自动在文件夹下搜寻相关数据
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
filename3=filepath[2]
elif os.path.isdir(filepath):
filename1=os.path.join(filepath,'All_Data_Readable.csv')
filename2=os.path.join(filepath,'All_Data_Original.csv')
filename3=os.path.join(filepath,'code.csv')
else:
print('can not dection the filepath!')
d1=pd.read_csv(filename1,encoding=encoding)
d1.drop([u'答题时长'],axis=1,inplace=True)
d2=pd.read_csv(filename2,encoding=encoding)
d3=pd.read_csv(filename3,encoding=encoding,header=None,na_filter=False)
d3=d3.as_matrix()
# 遍历code.csv,获取粗略的编码,暂缺qlist,矩阵单选题的code_r
code={}
for i in range(len(d3)):
if d3[i,0]:
key=d3[i,0]
code[key]={}
code[key]['content']=d3[i,1]
code[key]['qtype']=d3[i,2]
code[key]['code']={}
code[key]['qlist']=[]
elif d3[i,2]:
tmp=d3[i,1]
if code[key]['qtype'] in [u'多选题',u'排序题']:
tmp=key+'_A'+'%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist'].append(tmp)
elif code[key]['qtype'] in [u'单选题']:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
code[key]['qlist']=[key]
elif code[key]['qtype'] in [u'填空题']:
code[key]['qlist']=[key]
else:
try:
tmp=int(tmp)
except:
tmp='%s'%(tmp)
code[key]['code'][tmp]='%s'%(d3[i,2])
# 更新矩阵单选的code_r和qlist
qnames_Readable=list(d1.columns)
qnames=list(d2.columns)
for key in code.keys():
qlist=[]
for name in qnames:
if re.match(key+'_',name) or key==name:
qlist.append(name)
if ('qlist' not in code[key]) or (not code[key]['qlist']):
code[key]['qlist']=qlist
if code[key]['qtype'] in [u'矩阵单选题']:
tmp=[qnames_Readable[qnames.index(q)] for q in code[key]['qlist']]
code_r=[re.findall('_([^_]*?)$',t)[0] for t in tmp]
code[key]['code_r']=dict(zip(code[key]['qlist'],code_r))
# 处理时间格式
d2['start']=pd.to_datetime(d2['start'])
d2['finish']=pd.to_datetime(d2['finish'])
tmp=d2['finish']-d2['start']
tmp=tmp.astype(str).map(lambda x:60*int(re.findall(':(\d+):',x)[0])+int(re.findall(':(\d+)\.',x)[0]))
ind=np.where(d2.columns=='finish')[0][0]
d2.insert(int(ind)+1,u'答题时长(秒)',tmp)
return (d2,code)
def wenjuanxing(filepath='.\\data',headlen=6):
'''问卷星数据导入和编码
输入:
filepath:
列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本
文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls
headlen: 问卷星数据基础信息的列数
输出:
(data,code):
data为按序号的数据,题目都替换成了Q_n
code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
'''
#filepath='.\\data'
#headlen=6# 问卷从开始到第一道正式题的数目(一般包含序号,提交答卷时间的等等)
if isinstance(filepath,list):
filename1=filepath[0]
filename2=filepath[1]
elif os.path.isdir(filepath):
filelist=os.listdir(filepath)
n1=n2=0
for f in filelist:
s1=re.findall('\d+_\d+_0.xls',f)
s2=re.findall('\d+_\d+_2.xls',f)
if s1:
filename1=s1[0]
n1+=1
if s2:
filename2=s2[0]
n2+=1
if n1+n2==0:
print(u'在文件夹下没有找到问卷星按序号和按文本数据,请检查目录或者工作目录.')
return
elif n1+n2>2:
print(u'存在多组问卷星数据,请检查.')
return
filename1=os.path.join(filepath,filename1)
filename2=os.path.join(filepath,filename2)
else:
print('can not dection the filepath!')
d1=pd.read_excel(filename1)
d2=pd.read_excel(filename2)
d2.replace({-2:np.nan,-3:np.nan},inplace=True)
#d1.replace({u'(跳过)':np.nan},inplace=True)
code={}
'''
遍历一遍按文本数据,获取题号和每个题目的类型
'''
for name in d1.columns[headlen:]:
tmp=re.findall(u'^(\d{1,3})[、::]',name)
# 识别多选题、排序题
if tmp:
new_name='Q'+tmp[0]
current_name='Q'+tmp[0]
code[new_name]={}
content=re.findall(u'\d{1,3}[、::](.*)',name)
code[new_name]['content']=content[0]
d1.rename(columns={name:new_name},inplace=True)
code[new_name]['qlist']=[]
code[new_name]['code']={}
code[new_name]['qtype']=''
code[new_name]['name']=''
qcontent=str(list(d1[new_name]))
# 单选题和多选题每个选项都可能有开放题,得识别出来
if ('〖' in qcontent) and ('〗' in qcontent):
code[new_name]['qlist_open']=[]
if '┋' in qcontent:
code[new_name]['qtype']=u'多选题'
elif '→' in qcontent:
code[new_name]['qtype']=u'排序题'
# 识别矩阵单选题
else:
tmp2=re.findall(u'^第(\d{1,3})题\(.*?\)',name)
if tmp2:
new_name='Q'+tmp2[0]
else:
pass
if new_name not in code.keys():
j=1
current_name=new_name
new_name=new_name+'_R%s'%j
code[current_name]={}
code[current_name]['content']=current_name+'(问卷星数据中未找到题目具体内容)'
code[current_name]['qlist']=[]
code[current_name]['code']={}
code[current_name]['code_r']={}
code[current_name]['qtype']=u'矩阵单选题'
code[current_name]['name']=''
#code[current_name]['sample_len']=0
d1.rename(columns={name:new_name},inplace=True)
else:
j+=1
new_name=new_name+'_R%s'%j
d1.rename(columns={name:new_name},inplace=True)
#raise Exception(u"can not dection the NO. of question.")
#print('can not dection the NO. of question')
#print(name)
#pass
# 遍历按序号数据,完整编码
d2qlist=d2.columns[6:].tolist()
for name in d2qlist:
tmp1=re.findall(u'^(\d{1,3})[、::]',name)# 单选题和填空题
tmp2=re.findall(u'^第(.*?)题',name)# 多选题、排序题和矩阵单选题
if tmp1:
current_name='Q'+tmp1[0]# 当前题目的题号
d2.rename(columns={name:current_name},inplace=True)
code[current_name]['qlist'].append(current_name)
#code[current_name]['sample_len']=d2[current_name].count()
ind=d2[current_name].copy()
ind=ind.notnull()
c1=d1.loc[ind,current_name].unique()
c2=d2.loc[ind,current_name].unique()
#print('========= %s========'%current_name)
if (c2.dtype == object) or ((list(c1)==list(c2)) and len(c2)>=min(15,len(d2[ind]))) or (len(c2)>50):
code[current_name]['qtype']=u'填空题'
else:
code[current_name]['qtype']=u'单选题'
#code[current_name]['code']=dict(zip(c2,c1))
if 'qlist_open' in code[current_name].keys():
tmp=d1[current_name].map(lambda x: re.findall('〖(.*?)〗',x)[0] if re.findall('〖(.*?)〗',x) else '')
ind_open=np.argwhere(d2.columns.values==current_name).tolist()[0][0]
d2.insert(ind_open+1,current_name+'_open',tmp)
d1[current_name]=d1[current_name].map(lambda x: re.sub('〖.*?〗','',x))
#c1=d1.loc[ind,current_name].map(lambda x: re.sub('〖.*?〗','',x)).unique()
code[current_name]['qlist_open']=[current_name+'_open']
#c2_tmp=d2.loc[ind,current_name].map(lambda x: int(x) if (('%s'%x!='nan') and not(isinstance(x,str)) and (int(x)==x)) else x)
code[current_name]['code']=dict(zip(d2.loc[ind,current_name],d1.loc[ind,current_name]))
#code[current_name]['code']=dict(zip(c2,c1))
elif tmp2:
name0='Q'+tmp2[0]
# 新题第一个选项
if name0 != current_name:
j=1#记录多选题的小题号
current_name=name0
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
code[current_name]['code']=dict(zip(c2,c1))
#print(dict(zip(c2,c1)))
else:
name1='Q'+tmp2[0]+'_A%s'%j
#code[current_name]['sample_len']=d2[name].notnull().sum()
else:
j+=1#记录多选题的小题号
c2=list(d2[name].unique())
if code[current_name]['qtype'] == u'矩阵单选题':
name1='Q'+tmp2[0]+'_R%s'%j
c1=list(d1[name1].unique())
old_dict=code[current_name]['code'].copy()
new_dict=dict(zip(c2,c1))
old_dict.update(new_dict)
code[current_name]['code']=old_dict.copy()
else:
name1='Q'+tmp2[0]+'_A%s'%j
code[current_name]['qlist'].append(name1)
d2.rename(columns={name:name1},inplace=True)
tmp3=re.findall(u'第.*?题\((.*)\)',name)[0]
if code[current_name]['qtype'] == u'矩阵单选题':
code[current_name]['code_r'][name1]=tmp3
else:
code[current_name]['code'][name1]=tmp3
# 识别开放题
if (code[current_name]['qtype'] == u'多选题'):
openq=tmp3+'〖.*?〗'
openq=re.sub('\)','\)',openq)
openq=re.sub('\(','\(',openq)
openq=re.compile(openq)
qcontent=str(list(d1[current_name]))
if re.findall(openq,qcontent):
tmp=d1[current_name].map(lambda x: re.findall(openq,x)[0] if re.findall(openq,x) else '')
ind=np.argwhere(d2.columns.values==name1).tolist()[0][0]
d2.insert(ind+1,name1+'_open',tmp)
code[current_name]['qlist_open'].append(name1+'_open')
# 删除字典中的nan
keys=list(code[current_name]['code'].keys())
for key in keys:
if '%s'%key == 'nan':
del code[current_name]['code'][key]
# 处理一些特殊题目,给它们的选项固定顺序,例如年龄、收入等
for k in code.keys():
content=code[k]['content']
qtype=code[k]['qtype']
if ('code' in code[k]) and (code[k]['code']!={}):
tmp1=code[k]['code'].keys()
tmp2=code[k]['code'].values()
# 识别选项是否是有序变量
tmp3=[len(re.findall('\d+','%s'%v))>0 for v in tmp2]#是否有数字
tmp4=[len(re.findall('-|~','%s'%v))>0 for v in tmp2]#是否有"-"或者"~"
if (np.array(tmp3).sum()>=len(tmp2)-2) or (np.array(tmp4).sum()>=len(tmp2)*0.8-(1e-17)):
try:
tmp_key=sorted(code[k]['code'],key=lambda c:float(re.findall('[\d\.]+','%s'%c)[-1]))
except:
tmp_key=list(tmp1)
code_order=[code[k]['code'][v] for v in tmp_key]
code[k]['code_order']=code_order
# 识别矩阵量表题
if qtype=='矩阵单选题':
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if (set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10])) and (len(tmp3)==len(tmp2)):
code[k]['weight']=dict(zip(tmp1,tmp3))
continue
# 识别特殊题型
if ('性别' in content) and ('男' in tmp2) and ('女' in tmp2):
code[k]['name']='性别'
if ('gender' in content.lower()) and ('Male' in tmp2) and ('Female' in tmp2):
code[k]['name']='性别'
if (('年龄' in content) or ('age' in content.lower())) and (np.array(tmp3).sum()>=len(tmp2)-1):
code[k]['name']='年龄'
if ('满意度' in content) and ('整体' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='满意度'
if len(tmp3)==len(tmp2):
code[k]['weight']=dict(zip(tmp1,tmp3))
if ('意愿' in content) and ('推荐' in content):
tmp3=[int(re.findall('\d+','%s'%v)[0]) for v in tmp2 if re.findall('\d+','%s'%v)]
if set(tmp3)<=set([0,1,2,3,4,5,6,7,8,9,10]):
code[k]['name']='NPS'
if len(tmp3)==len(tmp2):
weight=pd.Series(dict(zip(tmp1,tmp3)))
weight=weight.replace(dict(zip([0,1,2,3,4,5,6,7,8,9,10],[-100,-100,-100,-100,-100,-100,-100,0,0,100,100])))
code[k]['weight']=weight.to_dict()
try:
d2[u'所用时间']=d2[u'所用时间'].map(lambda s: int(s[:-1]))
except:
pass
return (d2,code)
def load_data(method='filedialog',**kwargs):
'''导入问卷数据
# 暂时只支持已编码的和问卷星数据
1、支持路径搜寻
2、支持自由选择文件
method:
-filedialog: 打开文件窗口选择
-pathsearch:自带搜索路径,需提供filepath
'''
if method=='filedialog':
import tkinter as tk
from tkinter.filedialog import askopenfilenames
tk.Tk().withdraw();
#print(u'请选择编码所需要的数据文件(支持问卷星和已编码好的数据)')
if 'initialdir' in kwargs:
initialdir=kwargs['initialdir']
elif os.path.isdir('.\\data'):
initialdir = ".\\data"
else:
initialdir = "."
title =u"请选择编码所需要的数据文件(支持问卷星和已编码好的数据)"
filetypes = (("Excel files","*.xls;*.xlsx"),("CSV files","*.csv"),("all files","*.*"))
filenames=[]
while len(filenames)<1:
filenames=askopenfilenames(initialdir=initialdir,title=title,filetypes=filetypes)
if len(filenames)<1:
print('请至少选择一个文件.')
filenames=list(filenames)
elif method == 'pathsearch':
if 'filepath' in kwargs:
filepath=kwargs['filepath']
else :
filepath='.\\data\\'
if os.path.isdir(filepath):
filenames=os.listdir(filepath)
filenames=[os.path.join(filepath,s) for s in filenames]
else:
print('搜索路径错误')
raise
info=[]
for filename in filenames:
filename_nopath=os.path.split(filename)[1]
data=read_data(filename)
# 第一列包含的字段
field_c1=set(data.iloc[:,0].dropna().unique())
field_r1=set(data.columns)
# 列名是否包含Q
hqlen=[len(re.findall('^[qQ]\d+',c))>0 for c in field_r1]
hqrate=hqlen.count(True)/len(field_r1) if len(field_r1)>0 else 0
rowlens,collens=data.shape
# 数据中整数/浮点数的占比
rate_real=data.applymap(lambda x:isinstance(x,(int,float))).sum().sum()/rowlens/collens
tmp={'filename':filename_nopath,'filenametype':'','rowlens':rowlens,'collens':collens,\
'field_c1':field_c1,'field_r1':field_r1,'type':'','rate_real':rate_real}
if len(re.findall('^data.*\.xls',filename_nopath))>0:
tmp['filenametype']='data'
elif len(re.findall('^code.*\.xls',filename_nopath))>0:
tmp['filenametype']='code'
elif len(re.findall('\d+_\d+_\d.xls',filename_nopath))>0:
tmp['filenametype']='wenjuanxing'
if tmp['filenametype']=='code' or set(['key','code','qlist','qtype']) < field_c1:
tmp['type']='code'
if tmp['filenametype']=='wenjuanxing' or len(set(['序号','提交答卷时间','所用时间','来自IP','来源','来源详情','总分'])&field_r1)>=5:
tmp['type']='wenjuanxing'
if tmp['filenametype']=='data' or hqrate>=0.5:
tmp['type']='data'
info.append(tmp)
questype=[k['type'] for k in info]
# 这里有一个优先级存在,优先使用已编码好的数据,其次是问卷星数据
if questype.count('data')*questype.count('code')==1:
data=read_data(filenames[questype.index('data')])
code=read_code(filenames[questype.index('code')])
elif questype.count('wenjuanxing')>=2:
filenames=[(f,info[i]['rate_real']) for i,f in enumerate(filenames) if questype[i]=='wenjuanxing']
tmp=[]
for f,rate_real in filenames:
t2=0 if rate_real<0.5 else 2
d=pd.read_excel(f)
d=d.iloc[:,0]
tmp.append((t2,d))
#print('添加{}'.format(t2))
tmp_equal=0
for t,d0 in tmp[:-1]:
if len(d)==len(d0) and all(d==d0):
tmp_equal+=1
tmp[-1]=(t2+int(t/10)*10,tmp[-1][1])
max_quesnum=max([int(t/10) for t,d in tmp])
if tmp_equal==0:
tmp[-1]=(tmp[-1][0]+max_quesnum*10+10,tmp[-1][1])
#print('修改为{}'.format(tmp[-1][0]))
# 重新整理所有的问卷数据
questype=[t for t,d in tmp]
filenames=[f for f,r in filenames]
quesnums=max([int(t/10) for t in questype])#可能存在的数据组数
filename_wjx=[]
for i in range(1,quesnums+1):
if questype.count(i*10)==1 and questype.count(i*10+2)==1:
filename_wjx.append([filenames[questype.index(i*10)],filenames[questype.index(i*10+2)]])
if len(filename_wjx)==1:
data,code=wenjuanxing(filename_wjx[0])
elif len(filename_wjx)>1:
print('脚本识别出多组问卷星数据,请选择需要编码的数据:')
for i,f in enumerate(filename_wjx):
print('{}: {}'.format(i+1,'/'.join([os.path.split(f[0])[1],os.path.split(f[1])[1]])))
ii=input('您选择的数据是(数据前的编码,如:1):')
ii=re.sub('\s','',ii)
if ii.isnumeric():
data,code=wenjuanxing(filename_wjx[int(ii)-1])
else:
print('您输入正确的编码.')
else:
print('没有找到任何问卷数据..')
raise
else:
print('没有找到任何数据')
raise
return data,code
def spec_rcode(data,code):
city={'北京':0,'上海':0,'广州':0,'深圳':0,'成都':1,'杭州':1,'武汉':1,'天津':1,'南京':1,'重庆':1,'西安':1,'长沙':1,'青岛':1,'沈阳':1,'大连':1,'厦门':1,'苏州':1,'宁波':1,'无锡':1,\
'福州':2,'合肥':2,'郑州':2,'哈尔滨':2,'佛山':2,'济南':2,'东莞':2,'昆明':2,'太原':2,'南昌':2,'南宁':2,'温州':2,'石家庄':2,'长春':2,'泉州':2,'贵阳':2,'常州':2,'珠海':2,'金华':2,\
'烟台':2,'海口':2,'惠州':2,'乌鲁木齐':2,'徐州':2,'嘉兴':2,'潍坊':2,'洛阳':2,'南通':2,'扬州':2,'汕头':2,'兰州':3,'桂林':3,'三亚':3,'呼和浩特':3,'绍兴':3,'泰州':3,'银川':3,'中山':3,\
'保定':3,'西宁':3,'芜湖':3,'赣州':3,'绵阳':3,'漳州':3,'莆田':3,'威海':3,'邯郸':3,'临沂':3,'唐山':3,'台州':3,'宜昌':3,'湖州':3,'包头':3,'济宁':3,'盐城':3,'鞍山':3,'廊坊':3,'衡阳':3,\
'秦皇岛':3,'吉林':3,'大庆':3,'淮安':3,'丽江':3,'揭阳':3,'荆州':3,'连云港':3,'张家口':3,'遵义':3,'上饶':3,'龙岩':3,'衢州':3,'赤峰':3,'湛江':3,'运城':3,'鄂尔多斯':3,'岳阳':3,'安阳':3,\
'株洲':3,'镇江':3,'淄博':3,'郴州':3,'南平':3,'齐齐哈尔':3,'常德':3,'柳州':3,'咸阳':3,'南充':3,'泸州':3,'蚌埠':3,'邢台':3,'舟山':3,'宝鸡':3,'德阳':3,'抚顺':3,'宜宾':3,'宜春':3,'怀化':3,\
'榆林':3,'梅州':3,'呼伦贝尔':3,'临汾':4,'南阳':4,'新乡':4,'肇庆':4,'丹东':4,'德州':4,'菏泽':4,'九江':4,'江门市':4,'黄山':4,'渭南':4,'营口':4,'娄底':4,'永州市':4,'邵阳':4,'清远':4,\
'大同':4,'枣庄':4,'北海':4,'丽水':4,'孝感':4,'沧州':4,'马鞍山':4,'聊城':4,'三明':4,'开封':4,'锦州':4,'汉中':4,'商丘':4,'泰安':4,'通辽':4,'牡丹江':4,'曲靖':4,'东营':4,'韶关':4,'拉萨':4,\
'襄阳':4,'湘潭':4,'盘锦':4,'驻马店':4,'酒泉':4,'安庆':4,'宁德':4,'四平':4,'晋中':4,'滁州':4,'衡水':4,'佳木斯':4,'茂名':4,'十堰':4,'宿迁':4,'潮州':4,'承德':4,'葫芦岛':4,'黄冈':4,'本溪':4,\
'绥化':4,'萍乡':4,'许昌':4,'日照':4,'铁岭':4,'大理州':4,'淮南':4,'延边州':4,'咸宁':4,'信阳':4,'吕梁':4,'辽阳':4,'朝阳':4,'恩施州':4,'达州市':4,'益阳市':4,'平顶山':4,'六安':4,'延安':4,\
'梧州':4,'白山':4,'阜阳':4,'铜陵市':4,'河源':4,'玉溪市':4,'黄石':4,'通化':4,'百色':4,'乐山市':4,'抚州市':4,'钦州':4,'阳江':4,'池州市':4,'广元':4,'滨州':5,'阳泉':5,'周口市':5,'遂宁':5,\
'吉安':5,'长治':5,'铜仁':5,'鹤岗':5,'攀枝花':5,'昭通':5,'云浮':5,'伊犁州':5,'焦作':5,'凉山州':5,'黔西南州':5,'广安':5,'新余':5,'锡林郭勒':5,'宣城':5,'兴安盟':5,'红河州':5,'眉山':5,\
'巴彦淖尔':5,'双鸭山市':5,'景德镇市':5,'鸡西':5,'三门峡':5,'宿州':5,'汕尾':5,'阜新':5,'张掖':5,'玉林':5,'乌兰察布':5,'鹰潭':5,'黑河':5,'伊春':5,'贵港市':5,'漯河':5,'晋城':5,'克拉玛依':5,\
'随州':5,'保山':5,'濮阳':5,'文山州':5,'嘉峪关':5,'六盘水':5,'乌海':5,'自贡':5,'松原':5,'内江':5,'黔东南州':5,'鹤壁':5,'德宏州':5,'安顺':5,'资阳':5,'鄂州':5,'忻州':5,'荆门':5,'淮北':5,\
'毕节':5,'巴音郭楞':5,'防城港':5,'天水':5,'黔南州':5,'阿坝州':5,'石嘴山':5,'安康':5,'亳州市':5,'昌吉州':5,'普洱':5,'楚雄州':5,'白城':5,'贺州':5,'哈密':5,'来宾':5,'庆阳':5,'河池':5,\
'张家界 雅安':5,'辽源':5,'湘西州':5,'朔州':5,'临沧':5,'白银':5,'塔城地区':5,'莱芜':5,'迪庆州':5,'喀什地区':5,'甘孜州':5,'阿克苏':5,'武威':5,'巴中':5,'平凉':5,'商洛':5,'七台河':5,'金昌':5,\
'中卫':5,'阿勒泰':5,'铜川':5,'海西州':5,'吴忠':5,'固原':5,'吐鲁番':5,'阿拉善盟':5,'博尔塔拉州':5,'定西':5,'西双版纳':5,'陇南':5,'大兴安岭':5,'崇左':5,'日喀则':5,'临夏州':5,'林芝':5,\
'海东':5,'怒江州':5,'和田地区':5,'昌都':5,'儋州':5,'甘南州':5,'山南':5,'海南州':5,'海北州':5,'玉树州':5,'阿里地区':5,'那曲地区':5,'黄南州':5,'克孜勒苏州':5,'果洛州':5,'三沙':5}
code_keys=list(code.keys())
for qq in code_keys:
qlist=code[qq]['qlist']
#qtype=code[qq]['qtype']
content=code[qq]['content']
ind=list(data.columns).index(qlist[-1])
data1=data[qlist]
'''
识别问卷星中的城市题
'''
tf1=u'城市' in content
tf2=data1[data1.notnull()].applymap(lambda x:'-' in '%s'%x).all().all()
tf3=(qq+'a' not in data.columns) and (qq+'b' not in data.columns)
if tf1 and tf2 and tf3:
# 省份和城市
tmp1=data[qq].map(lambda x:x.split('-')[0])
tmp2=data[qq].map(lambda x:x.split('-')[1])
tmp2[tmp1==u'上海']=u'上海'
tmp2[tmp1==u'北京']=u'北京'
tmp2[tmp1==u'天津']=u'天津'
tmp2[tmp1==u'重庆']=u'重庆'
tmp2[tmp1==u'香港']=u'香港'
tmp2[tmp1==u'澳门']=u'澳门'
data.insert(ind+1,qq+'a',tmp1)
data.insert(ind+2,qq+'b',tmp2)
code[qq+'a']={'content':'省份','qtype':'填空题','qlist':[qq+'a']}
code[qq+'b']={'content':'城市','qtype':'填空题','qlist':[qq+'b']}
tmp3=data[qq+'b'].map(lambda x: city[x] if x in city.keys() else x)
tmp3=tmp3.map(lambda x: 6 if isinstance(x,str) else x)
data.insert(ind+3,qq+'c',tmp3)
code[qq+'c']={'content':'城市分级','qtype':'单选题','qlist':[qq+'c'],\
'code':{0:'北上广深',1:'新一线',2:'二线',3:'三线',4:'四线',5:'五线',6:'五线以下'}}
return data,code
def levenshtein(s, t):
''''' From Wikipedia article; Iterative with two matrix rows. '''
if s == t: return 0
elif len(s) == 0: return len(t)
elif len(t) == 0: return len(s)
v0 = [None] * (len(t) + 1)
v1 = [None] * (len(t) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s)):
v1[0] = i + 1
for j in range(len(t)):
cost = 0 if s[i] == t[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(len(v0)):
v0[j] = v1[j]
return v1[len(t)]
def code_similar(code1,code2):
'''
题目内容相似度用最小编辑距离来度量
选项相似度分为几种
1、完全相同:1
2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2
2、多选题/排序题:不考虑序号,共同变量超过一半即可:3
3、矩阵单选题:code_r 暂时只考虑完全匹配
4、其他情况为0
'''
code_distance_min=pd.DataFrame(index=code1.keys(),columns=['qnum','similar_content','similar_code'])
for c1 in code1:
# 计算题目内容的相似度
disstance_str=pd.Series(index=code2.keys())
for c2 in code2:
if code1[c1]['qtype']==code2[c2]['qtype']:
disstance_str[c2]=levenshtein(code1[c1]['content'], code2[c2]['content'])
c2=disstance_str.idxmin()
if '%s'%c2 == 'nan':
continue
min_len=(len(code1[c1]['content'])+len(code2[c2]['content']))/2
similar_content=100-100*disstance_str[c2]/min_len if min_len>0 else 0
# 计算选项的相似度
qtype=code2[c2]['qtype']
if qtype == '单选题':
t1=code1[c1]['code']
t2=code2[c2]['code']
inner_key=list(set(t1.keys())&set(t2.keys()))
tmp=all([t1[c]==t2[c] for c in inner_key])
if t1==t2:
similar_code=1
elif len(inner_key)>=0.5*len(set(t1.keys())|set(t2.keys())) and tmp:
similar_code=2
else:
similar_code=0
elif qtype in ['多选题','排序题']:
t1=code1[c1]['code']
t2=code2[c2]['code']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['矩阵多选题']:
t1=code1[c1]['code_r']
t2=code2[c2]['code_r']
t1=[t1[c] for c in code1[c1]['qlist']]
t2=[t2[c] for c in code2[c2]['qlist']]
inner_key=set(t1)&set(t2)
if t1==t2:
similar_code=1
elif len(set(t1)&set(t2))>=0.5*len(set(t1)|set(t2)):
similar_code=3
else:
similar_code=0
elif qtype in ['填空题']:
similar_code=1
else:
similar_code=0
code_distance_min.loc[c1,'qnum']=c2
code_distance_min.loc[c1,'similar_content']=similar_content
code_distance_min.loc[c1,'similar_code']=similar_code
# 剔除qnum中重复的值
code_distance_min=code_distance_min.sort_values(['qnum','similar_content','similar_code'],ascending=[False,False,True])
code_distance_min.loc[code_distance_min.duplicated(['qnum']),:]=np.nan
code_distance_min=pd.DataFrame(code_distance_min,index=code1.keys())
return code_distance_min
def data_merge(ques1,ques2,qlist1=None,qlist2=None,name1='ques1',name2='ques2',\
mergeqnum='Q0',similar_threshold=70):
'''合并两份数据
ques1: 列表,[data1,code1]
ques2: 列表,[data2,code2]
'''
data1,code1=ques1
data2,code2=ques2
if (qlist1 is None) or (qlist2 is None):
qlist1=[]
qlist2=[]
qqlist1=[]
qqlist2=[]
code_distance_min=code_similar(code1,code2)
code1_key=sorted(code1,key=lambda x:int(re.findall('\d+',x)[0]))
for c1 in code1_key:
qtype1=code1[c1]['qtype']
#print('{}:{}'.format(c1,code1[c1]['content']))
rs_qq=code_distance_min.loc[c1,'qnum']
similar_content=code_distance_min.loc[c1,'similar_content']
similar_code=code_distance_min.loc[c1,'similar_code']
if (similar_content>=similar_threshold) and (similar_code in [1,2]):
#print('推荐合并第二份数据中的{}({}), 两个题目相似度为为{:.0f}%'.format(rs_qq,code2[rs_qq]['content'],similar))
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
user_qq=rs_qq
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(rs_qq)
elif (similar_content>=similar_threshold) and (similar_code==3):
# 针对非单选题,此时要调整选项顺序
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t1_qlist=code1[c1]['qlist']
t1_value=[t1[k] for k in t1_qlist]
t2=code2[rs_qq]['code_r'] if qtype1 =='矩阵单选题' else code2[rs_qq]['code']
t2_qlist=code2[rs_qq]['qlist']
t2_value=[t2[k] for k in t2_qlist]
# 保留相同的选项
t1_qlist_new=[q for q in t1_qlist if t1[q] in list(set(t1_value)&set(t2_value))]
t2_r=dict(zip([s[1] for s in t2.items()],[s[0] for s in t2.items()]))
t2_qlist_new=[t2_r[s] for s in [t1[q] for q in t1_qlist_new]]
code1[c1]['qlist']=t1_qlist_new
code1[c1]['code']={k:t1[k] for k in t1_qlist_new}
qqlist1+=t1_qlist_new
qqlist2+=t2_qlist_new
qlist1.append(c1)
qlist2.append(rs_qq)
print('将自动合并: {} 和 {} (只保留了相同的选项)'.format(c1,rs_qq))
elif similar_code in [1,2]:
print('-'*40)
print('为【 {}:{} 】自动匹配到: '.format(c1,code1[c1]['content']))
print(' 【 {}:{} 】,其相似度为{:.0f}%.'.format(rs_qq,code2[rs_qq]['content'],similar_content))
tmp=input('是否合并该组题目,请输入 yes/no (也可以输入第二份数据中其他您需要匹配的题目): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['yes','y']:
user_qq=rs_qq
elif tmp in ['no','n']:
user_qq=None
else:
tmp=re.sub('^q','Q',tmp)
if tmp not in code2:
user_qq=None
elif (tmp in code2) and (tmp!=rs_qq):
print('您输入的是{}:{}'.format(tmp,code2[tmp]['content']))
user_qq=tmp
if user_qq==rs_qq:
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,rs_qq))
elif user_qq is not None:
# 比对两道题目的code
if 'code' in code1[c1] and len(code1[c1]['code'])>0:
t1=code1[c1]['code_r'] if qtype1 =='矩阵单选题' else code1[c1]['code']
t2=code2[user_qq]['code_r'] if code2[user_qq]['qtype'] =='矩阵单选题' else code2[user_qq]['code']
if set(t1.values())==set(t2.values()):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[user_qq]['qlist']
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('两个题目的选项不匹配,将自动跳过.')
else:
qqlist1+=[code1[c1]['qlist'][0]]
qqlist2+=[code2[user_qq]['qlist'][0]]
qlist1.append(c1)
qlist2.append(user_qq)
print('将自动合并: {} 和 {}'.format(c1,user_qq))
else:
print('将自动跳过: {}'.format(c1))
print('-'*40)
else:
print('将自动跳过: {}'.format(c1))
tmp=input('请问您需要的题目是否都已经合并? 请输入(yes / no): ')
tmp=re.sub('\s','',tmp)
tmp=tmp.lower()
if tmp in ['no','n']:
print('请确保接下来您要合并的题目类型和选项完全一样.')
while 1:
tmp=input('请输入您想合并的题目对,直接回车则终止输入(如: Q1,Q1 ): ')
tmp=re.sub('\s','',tmp)# 去掉空格
tmp=re.sub(',',',',tmp)# 修正可能错误的逗号
tmp=tmp.split(',')
tmp=[re.sub('^q','Q',qq) for qq in tmp]
if len(tmp)<2:
break
if tmp[0] in qlist1 or tmp[1] in qlist2:
print('该题已经被合并,请重新输入')
continue
if tmp[0] not in code1 or tmp[1] not in code2:
print('输入错误, 请重新输入')
continue
c1=tmp[0]
c2=tmp[1]
print('您输入的是:')
print('第一份数据中的【 {}:{} 】'.format(c1,code1[c1]['content']))
print('第二份数据中的【 {}:{} 】'.format(c2,code2[c2]['content']))
w=code_similar({c1:code1[c1]},{c2:code2[c2]})
similar_code=w.loc[c1,'similar_code']
if similar_code in [1,2] and len(code1[c1]['qlist'])==len(code2[c2]['qlist']):
qqlist1+=code1[c1]['qlist']
qqlist2+=code2[c2]['qlist']
qlist1.append(c1)
qlist2.append(c2)
print('将自动合并: {} 和 {}'.format(c1,c2))
else:
print('选项不匹配,请重新输入')
else:
qqlist1=[]
for qq in qlist1:
qqlist1=qqlist1+code1[qq]['qlist']
qqlist2=[]
for qq in qlist2:
qqlist2=qqlist2+code2[qq]['qlist']
# 将题号列表转化成data中的列名
if mergeqnum in qqlist1:
mergeqnum=mergeqnum+'merge'
data1=data1.loc[:,qqlist1]
data1.loc[:,mergeqnum]=1
data2=data2.loc[:,qqlist2]
data2.loc[:,mergeqnum]=2
if len(qqlist1)!=len(qqlist2):
print('两份数据选项不完全匹配,请检查....')
raise
data2=data2.rename(columns=dict(zip(qqlist2,qqlist1)))
data12=data1.append(data2,ignore_index=True)
code12={}
for i,cc in enumerate(qlist1):
code12[cc]=code1[cc]
if 'code' in code1[cc] and 'code' in code2[qlist2[i]]:
code12[cc]['code'].update(code2[qlist2[i]]['code'])
code12[mergeqnum]={'content':u'来源','code':{1:name1,2:name2},'qtype':u'单选题','qlist':[mergeqnum]}
return data12,code12
## ===========================================================
#
#
# 数据清洗 #
#
#
## ==========================================================
def clean_ftime(ftime,cut_percent=0.25):
'''
ftime 是完成问卷的秒数
思路:
1、只考虑截断问卷完成时间较小的样本
2、找到完成时间变化的拐点,即需要截断的时间点
返回:r
建议截断<r的样本
'''
t_min=int(ftime.min())
t_cut=int(ftime.quantile(cut_percent))
x=np.array(range(t_min,t_cut))
y=np.array([len(ftime[ftime<=i]) for i in range(t_min,t_cut)])
z1 = np.polyfit(x, y, 4) # 拟合得到的函数
z2=np.polyder(z1,2) #求二阶导数
r=np.roots(np.polyder(z2,1))
r=int(r[0])
return r
## ===========================================================
#
#
# 数据分析和输出 #
#
#
## ==========================================================
def data_auto_code(data):
'''智能判断问卷数据
输入
data: 数据框,列名需要满足Qi或者Qi_
输出:
code: 自动编码
'''
data=pd.DataFrame(data)
columns=data.columns
columns=[c for c in columns if re.match('Q\d+',c)]
code={}
for cc in columns:
# 识别题目号
if '_' not in cc:
key=cc
else:
key=cc.split('_')[0]
# 新的题目则产生新的code
if key not in code:
code[key]={}
code[key]['qlist']=[]
code[key]['code']={}
code[key]['content']=key
code[key]['qtype']=''
# 处理各题目列表
if key == cc:
code[key]['qlist']=[key]
elif re.findall('^'+key+'_[a-zA-Z]{0,}\d+$',cc):
code[key]['qlist'].append(cc)
else:
if 'qlist_open' in code[key]:
code[key]['qlist_open'].append(cc)
else:
code[key]['qlist_open']=[cc]
for kk in code.keys():
dd=data[code[kk]['qlist']]
# 单选题和填空题
if len(dd.columns)==1:
tmp=dd[dd.notnull()].iloc[:,0].unique()
if dd.iloc[:,0].value_counts().mean() >=2:
code[kk]['qtype']=u'单选题'
code[kk]['code']=dict(zip(tmp,tmp))
else:
code[kk]['qtype']=u'填空题'
del code[kk]['code']
else:
tmp=set(dd[dd.notnull()].as_matrix().flatten())
if set(tmp)==set([0,1]):
code[kk]['qtype']=u'多选题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
elif 'R' in code[kk]['qlist'][0]:
code[kk]['qtype']=u'矩阵单选题'
code[kk]['code_r']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
code[kk]['code']=dict(zip(list(tmp),list(tmp)))
else:
code[kk]['qtype']=u'排序题'
code[kk]['code']=dict(zip(code[kk]['qlist'],code[kk]['qlist']))
return code
def save_data(data,filename=u'data.xlsx',code=None):
'''保存问卷数据到本地
根据filename后缀选择相应的格式保存
如果有code,则保存按文本数据
'''
savetype=os.path.splitext(filename)[1][1:]
data1=data.copy()
if code:
for qq in code.keys():
qtype=code[qq]['qtype']
qlist=code[qq]['qlist']
if qtype == u'单选题':
# 将序号换成文本,题号加上具体内容
data1[qlist[0]].replace(code[qq]['code'],inplace=True)
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
elif qtype == u'矩阵单选题':
# 同单选题
data1[code[qq]['qlist']].replace(code[qq]['code'],inplace=True)
tmp1=code[qq]['qlist']
tmp2=['{}({})'.format(q,code[qq]['code_r'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'排序题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
tmp1=code[qq]['qlist']
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in tmp1]
data1.rename(columns=dict(zip(tmp1,tmp2)),inplace=True)
elif qtype in [u'多选题']:
# 先变成一道题,插入表中,然后再把序号变成文本
tmp=data[qlist]
tmp=tmp.rename(columns=code[qq]['code'])
tmp=dataCode_to_text(tmp)
ind=list(data1.columns).index(qlist[0])
qqname='{}({})'.format(qq,code[qq]['content'])
data1.insert(ind,qqname,tmp)
for q in qlist:
data1[q].replace({0:'',1:code[qq]['code'][q]},inplace=True)
tmp2=['{}_{}'.format(qq,code[qq]['code'][q]) for q in qlist]
data1.rename(columns=dict(zip(qlist,tmp2)),inplace=True)
else:
data1.rename(columns={qq:'{}({})'.format(qq,code[qq]['content'])},inplace=True)
if (savetype == u'xlsx') or (savetype == u'xls'):
data1.to_excel(filename,index=False)
elif savetype == u'csv':
data1.to_csv(filename,index=False)
def read_data(filename):
savetype=os.path.splitext(filename)[1][1:]
if (savetype==u'xlsx') or (savetype==u'xls'):
data=pd.read_excel(filename)
elif savetype==u'csv':
data=pd.read_csv(filename)
else:
print('con not read file!')
return data
def sa_to_ma(data):
'''单选题数据转换成多选题数据
data是单选题数据, 要求非有效列别为nan
可以使用内置函数pd.get_dummies()代替
'''
if isinstance(data,pd.core.frame.DataFrame):
data=data[data.columns[0]]
#categorys=sorted(data[data.notnull()].unique())
categorys=data[data.notnull()].unique()
try:
categorys=sorted(categorys)
except:
pass
#print('sa_to_ma function::cannot sorted')
data_ma=pd.DataFrame(index=data.index,columns=categorys)
for c in categorys:
data_ma[c]=data.map(lambda x : int(x==c))
data_ma.loc[data.isnull(),:]=np.nan
return data_ma
def to_dummpy(data,code,qqlist=None,qtype_new='多选题',ignore_open=True):
'''转化成哑变量
将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题
返回一个很大的只有0和1的数据
'''
if qqlist is None:
qqlist=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))
bdata=pd.DataFrame()
bcode={}
for qq in qqlist:
qtype=code[qq]['qtype']
data0=data[code[qq]['qlist']]
if qtype=='单选题':
data0=data0.iloc[:,0]
categorys=data0[data0.notnull()].unique()
try:
categorys=sorted(categorys)
except :
pass
categorys=[t for t in categorys if t in code[qq]['code']]
cname=[code[qq]['code'][k] for k in categorys]
columns_name=['{}_A{}'.format(qq,i+1) for i in range(len(categorys))]
tmp=pd.DataFrame(index=data0.index,columns=columns_name)
for i,c in enumerate(categorys):
tmp[columns_name[i]]=data0.map(lambda x : int(x==c))
#tmp.loc[data0.isnull(),:]=0
code_tmp={'content':code[qq]['content'],'qtype':qtype_new}
code_tmp['code']=dict(zip(columns_name,cname))
code_tmp['qlist']=columns_name
bcode.update({qq:code_tmp})
bdata=pd.concat([bdata,tmp],axis=1)
elif qtype in ['多选题','排序题','矩阵单选题']:
bdata=pd.concat([bdata,data0],axis=1)
bcode.update({qq:code[qq]})
bdata=bdata.fillna(0)
try:
bdata=bdata.astype(np.int64,raise_on_error=False)
except :
pass
return bdata,bcode
def qdata_flatten(data,code,quesid=None,userid_begin=None):
'''将问卷数据展平,字段如下
userid: 用户ID
quesid: 问卷ID
qnum: 题号
qname: 题目内容
qtype: 题目类型
samplelen:题目的样本数
itemnum: 选项序号
itemname: 选项内容
code: 用户的选择
codename: 用户选择的具体值
count: 计数
percent(%): 计数占比(百分比)
'''
if not userid_begin:
userid_begin=1000000
data.index=[userid_begin+i+1 for i in range(len(data))]
if '提交答卷时间' in data.columns:
begin_date=pd.to_datetime(data['提交答卷时间']).min().strftime('%Y-%m-%d')
end_date=pd.to_datetime(data['提交答卷时间']).max().strftime('%Y-%m-%d')
else:
begin_date=''
end_date=''
data,code=to_dummpy(data,code,qtype_new='单选题')
code_item={}
for qq in code:
if code[qq]['qtype']=='矩阵单选题':
code_item.update(code[qq]['code_r'])
else :
code_item.update(code[qq]['code'])
qdata=data.stack().reset_index()
qdata.columns=['userid','qn_an','code']
qdata['qnum']=qdata['qn_an'].map(lambda x:x.split('_')[0])
qdata['itemnum']=qdata['qn_an'].map(lambda x:'_'.join(x.split('_')[1:]))
if quesid:
qdata['quesid']=quesid
qdata=qdata[['userid','quesid','qnum','itemnum','code']]
else:
qdata=qdata[['userid','qnum','itemnum','code']]
# 获取描述统计信息:
samplelen=qdata.groupby(['userid','qnum'])['code'].sum().map(lambda x:int(x>0)).unstack().sum()
quesinfo=qdata.groupby(['qnum','itemnum','code'])['code'].count()
quesinfo.name='count'
quesinfo=quesinfo.reset_index()
quesinfo=quesinfo[quesinfo['code']!=0]
#quesinfo=qdata.groupby(['quesid','qnum','itemnum'])['code'].sum()
quesinfo['samplelen']=quesinfo['qnum'].replace(samplelen.to_dict())
quesinfo['percent(%)']=0
quesinfo.loc[quesinfo['samplelen']>0,'percent(%)']=100*quesinfo.loc[quesinfo['samplelen']>0,'count']/quesinfo.loc[quesinfo['samplelen']>0,'samplelen']
quesinfo['qname']=quesinfo['qnum'].map(lambda x: code[x]['content'])
quesinfo['qtype']=quesinfo['qnum'].map(lambda x: code[x]['qtype'])
quesinfo['itemname']=quesinfo['qnum']+quesinfo['itemnum'].map(lambda x:'_%s'%x)
quesinfo['itemname']=quesinfo['itemname'].replace(code_item)
#quesinfo['itemname']=quesinfo['qn_an'].map(lambda x: code[x.split('_')[0]]['code_r'][x] if \
#code[x.split('_')[0]]['qtype']=='矩阵单选题' else code[x.split('_')[0]]['code'][x])
# 各个选项的含义
quesinfo['codename']=''
quesinfo.loc[quesinfo['code']==0,'codename']='否'
quesinfo.loc[quesinfo['code']==1,'codename']='是'
quesinfo['tmp']=quesinfo['qnum']+quesinfo['code'].map(lambda x:'_%s'%int(x))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='矩阵单选题'),'tmp']\
.map(lambda x: code[x.split('_')[0]]['code'][int(x.split('_')[1])]))
quesinfo['codename'].update(quesinfo.loc[(quesinfo['code']>0)&(quesinfo['qtype']=='排序题'),'tmp'].map(lambda x: 'Top{}'.format(x.split('_')[1])))
quesinfo['begin_date']=begin_date
quesinfo['end_date']=end_date
if quesid:
quesinfo['quesid']=quesid
quesinfo=quesinfo[['quesid','begin_date','end_date','qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
else:
quesinfo=quesinfo[['qnum','qname','qtype','samplelen','itemnum','itemname','code','codename','count','percent(%)']]
# 排序
quesinfo['qnum']=quesinfo['qnum'].astype('category')
quesinfo['qnum'].cat.set_categories(sorted(list(quesinfo['qnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo['itemnum']=quesinfo['itemnum'].astype('category')
quesinfo['itemnum'].cat.set_categories(sorted(list(quesinfo['itemnum'].unique()),key=lambda x:int(re.findall('\d+',x)[0])), inplace=True)
quesinfo=quesinfo.sort_values(['qnum','itemnum','code'])
return qdata,quesinfo
def confidence_interval(p,n,alpha=0.05):
import scipy.stats as stats
t=stats.norm.ppf(1-alpha/2)
ci=t*math.sqrt(p*(1-p)/n)
#a=p-stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
#b=p+stats.norm.ppf(1-alpha/2)*math.sqrt(p*(1-p)/n)
return ci
def sample_size_cal(interval,N,alpha=0.05):
'''调研样本量的计算
参考:https://www.surveysystem.com/sscalc.htm
sample_size_cal(interval,N,alpha=0.05)
输入:
interval: 误差范围,例如0.03
N: 总体的大小,一般1万以上就没啥差别啦
alpha:置信水平,默认95%
'''
import scipy.stats as stats
p=stats.norm.ppf(1-alpha/2)
if interval>1:
interval=interval/100
samplesize=p**2/4/interval**2
if N:
samplesize=samplesize*N/(samplesize+N)
samplesize=int(round(samplesize))
return samplesize
def gof_test(fo,fe=None,alpha=0.05):
'''拟合优度检验
输入:
fo:观察频数
fe:期望频数,缺省为平均数
返回:
1: 样本与总体有差异
0:样本与总体无差异
例子:
gof_test(np.array([0.3,0.4,0.3])*222)
'''
import scipy.stats as stats
fo=np.array(fo).flatten()
C=len(fo)
if not fe:
N=fo.sum()
fe=np.array([N/C]*C)
else:
fe=np.array(fe).flatten()
chi_value=(fo-fe)**2/fe
chi_value=chi_value.sum()
chi_value_fit=stats.chi2.ppf(q=1-alpha,df=C-1)
#CV=np.sqrt((fo-fe)**2/fe**2/(C-1))*100
if chi_value>chi_value_fit:
result=1
else:
result=0
return result
def chi2_test(fo,alpha=0.05):
import scipy.stats as stats
fo=pd.DataFrame(fo)
chiStats = stats.chi2_contingency(observed=fo)
#critical_value = stats.chi2.ppf(q=1-alpha,df=chiStats[2])
#observed_chi_val = chiStats[0]
# p<alpha 等价于 observed_chi_val>critical_value
chi2_data=(chiStats[1] <= alpha,chiStats[1])
return chi2_data
def fisher_exact(fo,alpha=0.05):
'''fisher_exact 显著性检验函数
此处采用的是调用R的解决方案,需要安装包 pyper
python解决方案参见
https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/
但还有些问题,所以没用.
'''
import pyper as pr
r=pr.R(use_pandas=True,use_numpy=True)
r.assign('fo',fo)
r("b<-fisher.test(fo)")
pdata=r['b']
p_value=pdata['p.value']
if p_value<alpha:
result=1
else:
result=0
return (result,p_value)
def anova(data,formula):
'''方差分析
输入
--data: DataFrame格式,包含数值型变量和分类型变量
--formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]
返回[方差分析表]
[总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]
--df:自由度
--sum_sq:误差平方和
--mean_sq:误差平方和/对应的自由度
--F:mean_sq之比
--PR(>F):p值,比如<0.05则代表有显著性差异
'''
import statsmodels.api as sm
from statsmodels.formula.api import ols
cw_lm=ols(formula, data=data).fit() #Specify C for Categorical
r=sm.stats.anova_lm(cw_lm)
return r
def mca(X,N=2):
'''对应分析函数,暂时支持双因素
X:观察频数表
N:返回的维数,默认2维
可以通过scatter函数绘制:
fig=scatter([pr,pc])
fig.savefig('mca.png')
'''
from scipy.linalg import diagsvd
S = X.sum().sum()
Z = X / S # correspondence matrix
r = Z.sum(axis=1)
c = Z.sum()
D_r = np.diag(1/np.sqrt(r))
Z_c = Z - np.outer(r, c) # standardized residuals matrix
D_c = np.diag(1/np.sqrt(c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
P,s,Q = np.linalg.svd(np.dot(np.dot(D_r, Z_c),D_c))
#S=diagsvd(s[:2],P.shape[0],2)
pr=np.dot(np.dot(D_r,P),diagsvd(s[:N],P.shape[0],N))
pc=np.dot(np.dot(D_c,Q.T),diagsvd(s[:N],Q.shape[0],N))
inertia=np.cumsum(s**2)/np.sum(s**2)
inertia=inertia.tolist()
if isinstance(X,pd.DataFrame):
pr=pd.DataFrame(pr,index=X.index,columns=list('XYZUVW')[:N])
pc=pd.DataFrame(pc,index=X.columns,columns=list('XYZUVW')[:N])
return pr,pc,inertia
'''
w=pd.ExcelWriter(u'mca_.xlsx')
pr.to_excel(w,startrow=0,index_label=True)
pc.to_excel(w,startrow=len(pr)+2,index_label=True)
w.save()
'''
def cluster(data,code,cluster_qq,n_clusters='auto',max_clusters=7):
'''对态度题进行聚类
'''
from sklearn.cluster import KMeans
#from sklearn.decomposition import PCA
from sklearn import metrics
#import prince
qq_max=sorted(code,key=lambda x:int(re.findall('\d+',x)[0]))[-1]
new_cluster='Q{}'.format(int(re.findall('\d+',qq_max)[0])+1)
#new_cluster='Q32'
qlist=code[cluster_qq]['qlist']
X=data[qlist]
# 去除所有态度题选择的分数都一样的用户(含仅有两个不同)
std_t=min(1.41/np.sqrt(len(qlist)),0.40) if len(qlist)>=8 else 0.10
X=X[X.T.std()>std_t]
index_bk=X.index#备份,方便还原
X.fillna(0,inplace=True)
X1=X.T
X1=(X1-X1.mean())/X1.std()
X1=X1.T.as_matrix()
if n_clusters == 'auto':
#聚类个数的选取和评估
silhouette_score=[]# 轮廊系数
SSE_score=[]
klist=np.arange(2,15)
for k in klist:
est = KMeans(k) # 4 clusters
est.fit(X1)
tmp=np.sum((X1-est.cluster_centers_[est.labels_])**2)
SSE_score.append(tmp)
tmp=metrics.silhouette_score(X1, est.labels_)
silhouette_score.append(tmp)
'''
fig = plt.figure(1)
ax = fig.add_subplot(111)
fig = plt.figure(2)
ax.plot(klist,np.array(silhouette_score))
ax = fig.add_subplot(111)
ax.plot(klist,np.array(SSE_score))
'''
# 找轮廊系数的拐点
ss=np.array(silhouette_score)
t1=[False]+list(ss[1:]>ss[:-1])
t2=list(ss[:-1]>ss[1:])+[False]
k_log=[t1[i]&t2[i] for i in range(len(t1))]
if True in k_log:
k=k_log.index(True)
else:
k=1
k=k if k<=max_clusters-2 else max_clusters-2 # 限制最多分7类
k_best=klist[k]
else:
k_best=n_clusters
est = KMeans(k_best) # 4 clusters
est.fit(X1)
# 系数计算
SSE=np.sqrt(np.sum((X1-est.cluster_centers_[est.labels_])**2)/len(X1))
silhouette_score=metrics.silhouette_score(X1, est.labels_)
print('有效样本数:{},特征数:{},最佳分类个数:{} 类'.format(len(X1),len(qlist),k_best))
print('SSE(样本到所在类的质心的距离)为:{:.2f},轮廊系数为: {:.2f}'.format(SSE,silhouette_score))
# 绘制降维图
'''
X_PCA = PCA(2).fit_transform(X1)
kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),
edgecolor='none', alpha=0.6)
labels=pd.Series(est.labels_)
plt.figure()
plt.scatter(X_PCA[:, 0], X_PCA[:, 1], c=labels, **kwargs)
'''
'''
# 三维立体图
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_PCA[:, 0], X_PCA[:, 1],X_PCA[:, 2], c=labels, **kwargs)
'''
# 导出到原数据
parameters={'methods':'kmeans','inertia':est.inertia_,'SSE':SSE,'silhouette':silhouette_score,\
'n_clusters':k_best,'n_features':len(qlist),'n_samples':len(X1),'qnum':new_cluster,\
'data':X1,'labels':est.labels_}
data[new_cluster]=pd.Series(est.labels_,index=index_bk)
code[new_cluster]={'content':'态度题聚类结果','qtype':'单选题','qlist':[new_cluster],
'code':dict(zip(range(k_best),['cluster{}'.format(i+1) for i in range(k_best)]))}
print('结果已经存进数据, 题号为:{}'.format(new_cluster))
return data,code,parameters
'''
# 对应分析
t=data.groupby([new_cluster])[code[cluster_qq]['qlist']].mean()
t.columns=['R{}'.format(i+1) for i in range(len(code[cluster_qq]['qlist']))]
t=t.rename(index=code[new_cluster]['code'])
ca=prince.CA(t)
ca.plot_rows_columns(show_row_labels=True,show_column_labels=True)
'''
def scatter(data,legend=False,title=None,font_ch=None,find_path=None):
'''
绘制带数据标签的散点图
'''
import matplotlib.font_manager as fm
if font_ch is None:
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','msyh.ttc']
myfont=''
if not find_path:
find_paths=['C:\\Windows\\Fonts','']
# fontlist 越靠后越优先,findpath越靠后越优先
for find_path in find_paths:
for f in fontlist:
if os.path.exists(os.path.join(find_path,f)):
myfont=os.path.join(find_path,f)
if len(myfont)==0:
print('没有找到合适的中文字体绘图,请检查.')
myfont=None
else:
myfont = fm.FontProperties(fname=myfont)
else:
myfont=fm.FontProperties(fname=font_ch)
fig, ax = plt.subplots()
#ax.grid('on')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color=['blue','red','green','dark']
if not isinstance(data,list):
data=[data]
for i,dd in enumerate(data):
ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,
label=dd.columns[1])
for _, row in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)
ax.axis('equal')
if legend:
ax.legend(loc='best')
if title:
ax.set_title(title,fontproperties=myfont)
return fig
def sankey(df,filename=None):
'''SanKey图绘制
df的列是左节点,行是右节点
注:暂时没找到好的Python方法,所以只生成R语言所需数据
返回links 和 nodes
# R code 参考
library(networkD3)
dd=read.csv('price_links.csv')
links<-data.frame(source=dd$from,target=dd$to,value=dd$value)
nodes=read.csv('price_nodes.csv',encoding = 'UTF-8')
nodes<-nodes['name']
Energy=c(links=links,nodes=nodes)
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh",fontSize = 20,fontFamily='微软雅黑',nodeWidth=20)
'''
nodes=['Total']
nodes=nodes+list(df.columns)+list(df.index)
nodes=pd.DataFrame(nodes)
nodes['id']=range(len(nodes))
nodes.columns=['name','id']
R,C=df.shape
t1=pd.DataFrame(df.as_matrix(),columns=range(1,C+1),index=range(C+1,R+C+1))
t1.index.name='to'
t1.columns.name='from'
links=t1.unstack().reset_index(name='value')
links0=pd.DataFrame({'from':[0]*C,'to':range(1,C+1),'value':list(df.sum())})
links=links0.append(links)
if filename:
links.to_csv(filename+'_links.csv',index=False,encoding='utf-8')
nodes.to_csv(filename+'_nodes.csv',index=False,encoding='utf-8')
return (links,nodes)
def table(data,code,total=True):
'''
单个题目描述统计
code是data的编码,列数大于1
返回字典格式数据:
'fop':百分比, 对于单选题和为1,多选题分母为样本数
'fo': 观察频数表,其中添加了合计项
'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
'''
# 单选题
qtype=code['qtype']
index=code['qlist']
data=pd.DataFrame(data)
sample_len=data[code['qlist']].notnull().T.any().sum()
result={}
if qtype == u'单选题':
fo=data.iloc[:,0].value_counts()
if 'weight' in code:
w=pd.Series(code['weight'])
fo1=fo[w.index][fo[w.index].notnull()]
fw=(fo1*w).sum()/fo1.sum()
result['fw']=fw
fo.sort_values(ascending=False,inplace=True)
fop=fo.copy()
fop=fop/fop.sum()*1.0
fop[u'合计']=fop.sum()
fo[u'合计']=fo.sum()
if 'code' in code:
fop.rename(index=code['code'],inplace=True)
fo.rename(index=code['code'],inplace=True)
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fo']=fo
result['fop']=fop
elif qtype == u'多选题':
fo=data.sum()
fo.sort_values(ascending=False,inplace=True)
fo[u'合计']=fo.sum()
if 'code' in code:
fo.rename(index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
fop.name=u'占比'
fo.name=u'频数'
fop=pd.DataFrame(fop)
fo=pd.DataFrame(fo)
result['fop']=fop
result['fo']=fo
elif qtype == u'矩阵单选题':
fo=pd.DataFrame(columns=code['qlist'],index=sorted(code['code']))
for i in fo.columns:
fo.loc[:,i]=data[i].value_counts()
if 'weight' not in code:
code['weight']=dict(zip(code['code'].keys(),code['code'].keys()))
fw=pd.DataFrame(columns=[u'加权'],index=code['qlist'])
w=pd.Series(code['weight'])
for c in fo.columns:
t=fo[c]
t=t[w.index][t[w.index].notnull()]
if t.sum()>1e-17:
fw.loc[c,u'加权']=(t*w).sum()/t.sum()
else:
fw.loc[c,u'加权']=0
fw.rename(index=code['code_r'],inplace=True)
result['fw']=fw
result['weight']=','.join(['{}:{}'.format(code['code'][c],code['weight'][c]) for c in code['code']])
fo.rename(columns=code['code_r'],index=code['code'],inplace=True)
fop=fo.copy()
fop=fop/sample_len
result['fop']=fop
result['fo']=fo
elif qtype == u'排序题':
#提供综合统计和TOP1值统计
# 其中综合的算法是当成单选题,给每个TOP分配和为1的权重
#topn=max([len(data[q][data[q].notnull()].unique()) for q in index])
#topn=len(index)
topn=data[index].fillna(0).max().max()
topn=int(topn)
qsort=dict(zip([i+1 for i in range(topn)],[(topn-i)*2.0/(topn+1)/topn for i in range(topn)]))
top1=data.applymap(lambda x:int(x==1))
data_weight=data.replace(qsort)
t1=pd.DataFrame()
t1['TOP1']=top1.sum()
t1[u'综合']=data_weight.sum()
t1.sort_values(by=u'综合',ascending=False,inplace=True)
t1.rename(index=code['code'],inplace=True)
t=t1.copy()
t=t/sample_len
result['fop']=t
result['fo']=t1
# 新增topn矩阵
t_topn=pd.DataFrame()
for i in range(topn):
t_topn['TOP%d'%(i+1)]=data.applymap(lambda x:int(x==i+1)).sum()
t_topn.sort_values(by=u'TOP1',ascending=False,inplace=True)
if 'code' in code:
t_topn.rename(index=code['code'],inplace=True)
result['TOPN_fo']=t_topn#频数
result['TOPN']=t_topn/sample_len
result['weight']='+'.join(['TOP{}*{:.2f}'.format(i+1,(topn-i)*2.0/(topn+1)/topn) for i in range(topn)])
else:
result['fop']=None
result['fo']=None
if (not total) and not(result['fo'] is None) and (u'合计' in result['fo'].index):
result['fo'].drop([u'合计'],axis=0,inplace=True)
result['fop'].drop([u'合计'],axis=0,inplace=True)
if not(result['fo'] is None) and ('code_order' in code):
code_order=[q for q in code['code_order'] if q in result['fo'].index]
if u'合计' in result['fo'].index:
code_order=code_order+[u'合计']
result['fo']=pd.DataFrame(result['fo'],index=code_order)
result['fop']=pd.DataFrame(result['fop'],index=code_order)
return result
def crosstab(data_index,data_column,code_index=None,code_column=None,qtype=None,total=True):
'''适用于问卷数据的交叉统计
输入参数:
data_index: 因变量,放在行中
data_column:自变量,放在列中
code_index: dict格式,指定data_index的编码等信息
code_column: dict格式,指定data_column的编码等信息
qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的
返回字典格式数据
'fop':默认的百分比表,行是data_index,列是data_column
'fo':原始频数表,且添加了总体项
'fw': 加权平均值
简要说明:
因为要处理各类题型,这里将单选题处理为多选题
fo:观察频数表
nij是同时选择了Ri和Cj的频数
总体的频数是选择了Ri的频数,与所在行的总和无关
行变量\列变量 C1 |C2 | C3| C4|总体
R1| n11|n12|n13|n14|n1:
R2| n21|n22|n23|n23|n2:
R3| n31|n32|n33|n34|n3:
fop: 观察百分比表(列变量)
这里比较难处理,data_column各个类别的样本量和总体的样本量不一样,各类别的样本量为同时
选择了行变量和列类别的频数。而总体的样本量为选择了行变量的频数
fw: 加权平均值
如果data_index的编码code含有weight字段,则我们会输出分组的加权平均值
'''
# 将Series转为DataFrame格式
data_index=pd.DataFrame(data_index)
data_column= | pd.DataFrame(data_column) | pandas.DataFrame |
import itertools
import operator
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import ( # noqa: F401
as_array,
as_frame,
as_series,
fillna_method,
groupby_apply_op,
use_numpy,
)
from pint.errors import DimensionalityError
from pint.testsuite import QuantityTestCase, helpers
import pint_pandas as ppi
from pint_pandas import PintArray
ureg = ppi.PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return ppi.PintType("pint[meter]")
@pytest.fixture
def data():
return ppi.PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0) * ureg.nm
)
@pytest.fixture
def data_missing():
return ppi.PintArray.from_1darray_quantity([np.nan, 1] * ureg.meter)
@pytest.fixture
def data_for_twos():
x = [
2.0,
] * 100
return ppi.PintArray.from_1darray_quantity(x * ureg.meter)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
# no idea what I'm meant to put here, try just copying from https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/integer/test_integer.py
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting():
return ppi.PintArray.from_1darray_quantity([0.3, 10, -50] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [1 * ureg.meter, 3 * ureg.meter, 10 * ureg.centimeter]
@pytest.fixture
def data_missing_for_sorting():
return ppi.PintArray.from_1darray_quantity([4, np.nan, -5] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [4 * ureg.meter, np.nan, 10 * ureg.centimeter]
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(np.isnan(x.magnitude)) & bool(np.isnan(y.magnitude))
@pytest.fixture
def na_value():
return ppi.PintType("meter").na_value
@pytest.fixture
def data_for_grouping():
# should probably get more sophisticated here and use units on all these
# quantities
a = 1.0
b = 2.0 ** 32 + 1
c = 2.0 ** 32 + 10
return ppi.PintArray.from_1darray_quantity(
[b, b, np.nan, np.nan, a, a, b, c] * ureg.m
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
# "std",
# "var",
"median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_with_index(self, data, dtype):
scalar = data[0]
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
result = pd.Series(scalar, index=["foo"], dtype=dtype)
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
self.assert_series_equal(result, expected)
class TestDtype(base.BaseDtypeTests):
pass
class TestGetitem(base.BaseGetitemTests):
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
msg = f"Boolean index has wrong length: 2 instead of {len(data)}"
with pytest.raises(IndexError, match=msg):
data[mask]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match=msg):
data[mask]
class TestGroupby(base.BaseGroupbyTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
class TestInterface(base.BaseInterfaceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# See test_setitem_mask_broadcast note
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# See test_setitem_mask_broadcast note
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_searchsorted(self, data_for_sorting, as_series): # noqa: F811
b, c, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b, c])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
assert arr.searchsorted(c) == 2
assert arr.searchsorted(c, side="right") == 3
result = arr.searchsorted(arr.take([0, 2]))
expected = np.array([0, 2], dtype=np.intp)
self.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 2, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_where_series(self, data, na_value, as_frame): # noqa: F811
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
if as_frame:
ser = ser.to_frame(name="a")
cond = cond.reshape(-1, 1)
result = ser.where(cond)
expected = pd.Series(
cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
)
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
# array other
cond = np.array([True, False, True, True])
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
if as_frame:
other = pd.DataFrame({"a": other})
cond = pd.DataFrame({"a": cond})
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, exc)
def _check_op(self, s, op, other, exc=None):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
# divmod has multiple return values, so check separately
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
def _get_exception(self, data, op_name):
if op_name in ["__pow__", "__rpow__"]:
return op_name, DimensionalityError
else:
return op_name, None
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name, exc = self._get_exception(data, all_arithmetic_operators)
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=exc)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name, exc = self._get_exception(data, all_arithmetic_operators)
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0], exc=exc)
@pytest.mark.xfail(run=True, reason="s.combine does not accept arrays")
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name, exc = self._get_exception(data, all_arithmetic_operators)
s = pd.Series(data)
self.check_opname(s, op_name, data, exc=exc)
# parameterise this to try divisor not equal to 1
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1 * ureg.Mm)
self._check_divmod_op(1 * ureg.Mm, ops.rdivmod, s)
@pytest.mark.xfail(run=True, reason="Test is deleted in pd 1.3, pd GH #39386")
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
# TODO: work out how to make this more specific/test for the two
# different possible errors here
with pytest.raises(Exception):
ops("foo")
# TODO: work out how to make this more specific/test for the two
# different possible errors here
with pytest.raises(Exception):
ops(pd.Timestamp("20180101"))
# invalid array-likes
# TODO: work out how to make this more specific/test for the two
# different possible errors here
#
# This won't always raise exception, eg for foo % 3 m
if "mod" not in op:
with pytest.raises(Exception):
ops(pd.Series("foo", index=s.index))
# 2d
with pytest.raises(KeyError):
opa(pd.DataFrame({"A": s}))
with pytest.raises(ValueError):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# EAs should return NotImplemented for ops with Series/DataFrame
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
if hasattr(data, "__add__"):
result = data.__add__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement add")
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
result = op(s, other)
expected = op(s.values.quantity, other)
assert (result == expected).all()
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = data[0]
self._compare_other(s, data, op_name, other)
def test_compare_array(self, data, all_compare_operators):
# nb this compares an quantity containing array
# eg Q_([1,2],"m")
op_name = all_compare_operators
s = pd.Series(data)
other = data
self._compare_other(s, data, op_name, other)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# EAs should return NotImplemented for ops with Series/DataFrame
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
if hasattr(data, "__eq__"):
result = data.__eq__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement __eq__")
if hasattr(data, "__ne__"):
result = data.__ne__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement __ne__")
class TestOpsUtil(base.BaseOpsUtil):
pass
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_scalar(self, data_missing):
valid = data_missing[1]
result = data_missing.fillna(valid)
expected = data_missing.fillna(valid)
self.assert_extension_array_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(
data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
)
)
self.assert_series_equal(result, expected)
# Fill with a series
result = ser.fillna(expected)
self.assert_series_equal(result, expected)
# Fill with a series not affecting the missing values
result = ser.fillna(ser)
self.assert_series_equal(result, ser)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_fillna_frame(self, data_missing):
fill_value = data_missing[1]
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
expected = pd.DataFrame(
{
"A": data_missing._from_sequence(
[fill_value, fill_value], dtype=data_missing.dtype
),
"B": [1, 2],
}
)
self.assert_series_equal(result, expected)
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected_m = getattr(pd.Series(s.values.quantity._magnitude), op_name)(
skipna=skipna
)
expected_u = s.values.quantity.units
expected = ureg.Quantity(expected_m, expected_u)
assert result == expected
class TestBooleanReduce(base.BaseBooleanReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(pd.Series(s.values.quantity._magnitude), op_name)(
skipna=skipna
)
assert result == expected
class TestReshaping(base.BaseReshapingTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
@pytest.mark.parametrize("obj", ["series", "frame"])
def test_unstack(self, data, index, obj):
data = data[: len(index)]
if obj == "series":
ser = pd.Series(data, index=index)
else:
ser = pd.DataFrame({"A": data, "B": data}, index=index)
n = index.nlevels
levels = list(range(n))
# [0, 1, 2]
# [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
combinations = itertools.chain.from_iterable(
itertools.permutations(levels, i) for i in range(1, n)
)
for level in combinations:
result = ser.unstack(level=level)
assert all(
isinstance(result[col].array, type(data)) for col in result.columns
)
if obj == "series":
# We should get the same result with to_frame+unstack+droplevel
df = ser.to_frame()
alt = df.unstack(level=level).droplevel(0, axis=1)
self.assert_frame_equal(result, alt)
expected = ser.astype(object).unstack(level=level)
result = result.astype(object)
self.assert_frame_equal(result, expected)
class TestSetitem(base.BaseSetitemTests):
@pytest.mark.parametrize("setter", ["loc", None])
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# Pandas performs a hasattr(__array__), which triggers the warning
# Debugging it does not pass through a PintArray, so
# I think this needs changing in pint quantity
# eg s[[True]*len(s)]=Q_(1,"m")
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_setitem_mask_broadcast(self, data, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if setter: # loc
target = getattr(ser, setter)
else: # __setitem__
target = ser
operator.setitem(target, mask, data[10])
assert ser[0] == data[10]
assert ser[1] == data[10]
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_setitem_sequence_broadcasts(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[[0, 1]] = data[2]
assert data[0] == data[2]
assert data[1] == data[2]
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[idx] = arr[0]
self.assert_equal(arr, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_setitem_slice(self, data, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[:3] = data[0]
self.assert_equal(arr, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_setitem_loc_iloc_slice(self, data):
arr = data[:5].copy()
s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
result = s.copy()
result.iloc[:3] = data[0]
self.assert_equal(result, expected)
result = s.copy()
result.loc[:"c"] = data[0]
self.assert_equal(result, expected)
class TestOffsetUnits(object):
@pytest.mark.xfail(run=True, reason="TODO untested issue that was fixed")
def test_offset_concat(self):
q_a = ureg.Quantity(np.arange(5), ureg.Unit("degC"))
q_b = ureg.Quantity(np.arange(6), ureg.Unit("degC"))
a = pd.Series(PintArray(q_a))
b = pd.Series(PintArray(q_b))
result = pd.concat([a, b], axis=1)
expected = pd.Series(PintArray(np.concatenate([q_b, q_b]), dtype="pint[degC]"))
self.assert_equal(result, expected)
# would be ideal to just test all of this by running the example notebook
# but this isn't a discussion we've had yet
class TestUserInterface(object):
def test_get_underlying_data(self, data):
ser = pd.Series(data)
# this first test creates an array of bool (which is desired, eg for indexing)
assert all(ser.values == data)
assert ser.values[23] == data[23]
def test_arithmetic(self, data):
ser = pd.Series(data)
ser2 = ser + ser
assert all(ser2.values == 2 * data)
def test_initialisation(self, data):
# fails with plain array
# works with PintArray
df = pd.DataFrame(
{
"length": pd.Series([2.0, 3.0], dtype="pint[m]"),
"width": PintArray([2.0, 3.0], dtype="pint[m]"),
"distance": PintArray([2.0, 3.0], dtype="m"),
"height": PintArray([2.0, 3.0], dtype=ureg.m),
"depth": PintArray.from_1darray_quantity(
ureg.Quantity([2.0, 3.0], ureg.m)
),
}
)
for col in df.columns:
assert all(df[col] == df.length)
def test_df_operations(self):
# simply a copy of what's in the notebook
df = pd.DataFrame(
{
"torque": | pd.Series([1.0, 2.0, 2.0, 3.0], dtype="pint[lbf ft]") | pandas.Series |
# encoding: utf-8
import sys
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from util import get_trained_model, get_dataloader
pd.set_option('display.max_colwidth',1000)
def predict_single_model(model, dataloder):
model.cuda()
model.eval()
pred = torch.FloatTensor().cuda()
with torch.no_grad():
for i, (inp) in enumerate(dataloder):
input_var = torch.autograd.Variable(inp.cuda())
output = model(input_var)
pred = torch.cat((pred, output.data), 0)
return pred.cpu().data.numpy()
def ensemble(predictions, ratio):
prediction_ensemble = np.zeros(shape = predictions[0].shape, dtype = float)
for i in range(0,len(ratio)):
prediction_ensemble += predictions[i]*ratio[i]
return prediction_ensemble
def predict_file(prediction_np, input_file, output_file):
"""
arguments:
prediction_np:(numpy) prediction from the model
input_file: (csv/txt) image path list
output_file:(csv/txt) out put predicted label as a file
!!modify to meet your needs!!
u_one_features = ['Atelectasis', 'Edema']
u_zero_features = ['Cardiomegaly', 'Consolidation', 'Pleural Effusion']
"""
def get_final_csv(df):
result = pd.DataFrame(columns=['Path','Study','Atelectasis','Cardiomegaly','Pleural Effusion','Consolidation','Edema'])
for Study_name in set(df['Study']):
tmp = df[df['Study'].isin([Study_name])]
tmp_result = tmp[0:1].copy()
tmp_result['Atelectasis'] = tmp['Atelectasis'].max()
tmp_result['Edema'] = tmp['Edema'].max()
tmp_result['Cardiomegaly'] = tmp['Cardiomegaly'].mean()
tmp_result['Pleural Effusion'] = tmp['Pleural Effusion'].mean()
tmp_result['Consolidation'] = tmp['Consolidation'].mean()
result = | pd.concat([result,tmp_result],axis=0) | pandas.concat |
import logging
import math
import random
import sys
import numpy
import pandas
import openbiolink.graphProperties as graphProp
from openbiolink import globalConfig
from openbiolink import globalConfig as glob
from openbiolink import utils
from openbiolink.graph_creation.metadata_edge import edgeMetadata as meta
from openbiolink.train_test_set_creation.sampler import NegativeSampler
from openbiolink.train_test_set_creation.trainTestSetWriter import TrainTestSetWriter
random.seed(glob.RANDOM_STATE)
numpy.random.seed(glob.RANDOM_STATE)
class TrainTestSetCreation():
"""
Manager class for handling the creation of train test splits given a graph
Attributes
----------
all_nodes : pandas.DataFrame
DataFrame with all nodes, columns = globalConfig.COL_NAMES_NODES
all_tp : pandas.DataFrame
DataFrame with edges from the positive graph, i.e. all positive examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tp_edgeTypes : [str]
list of all edge types present in the positive examples
all_tn : pandas.DataFrame
DataFrame with edges from the negative graph, i.e. all negative examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tn_edgeTypes : [str]
list of all edge types present in the negative examples
meta_edges_dic : {str: (str, str, str)}
dictionary for all possible h,r,t combinations, mapped to their types. The key consists of
%s_%s_%s'%(node1Type,edgeType,node2Type) the value (node1Type, edgeType, node2Type)
tmo_nodes : pandas.DataFrame
DataFrame with all nodes present in the t-1 graph, columns = globalConfig.COL_NAMES_NODES
tmo_all_tp : pandas.DataFrame
DataFrame with edges from the positive t-1 graph, i.e. all positive t-1 examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tmo_tp_edgeTypes : [str]
list of all edge types present in the positive t-1 examples
tmo_all_tn : pandas.DataFrame
DataFrame with edges from the negative t-1 graph, i.e. all negative t-1 examples
columns = globalConfig.COL_NAMES_EDGES + globalConfig.VALUE_COL_NAME
tmo_tn_edgeTypes : [str]
list of all edge types present in the negative t-1 examples
"""
def __init__(self,
graph_path,
tn_graph_path,
all_nodes_path,
sep='\t',
#meta_edge_triples=None, #nicetohave (1) split for subsample of edges, define own meta edges
t_minus_one_graph_path=None,
t_minus_one_tn_graph_path=None,
t_minus_one_nodes_path=None):
self.writer = TrainTestSetWriter()
with open(all_nodes_path) as file:
self.all_nodes = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_NODES)
self.all_nodes = self.all_nodes.sort_values(by=globalConfig.COL_NAMES_NODES).reset_index(drop=True)
with open(graph_path) as file:
self.all_tp = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_EDGES)
self.all_tp[globalConfig.VALUE_COL_NAME] = 1
self.all_tp = self.all_tp.sort_values(by=globalConfig.COL_NAMES_EDGES).reset_index(drop=True)
self.tp_edgeTypes = list(self.all_tp[globalConfig.EDGE_TYPE_COL_NAME].unique())
with open(tn_graph_path) as file:
self.all_tn = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_EDGES)
self.all_tn[globalConfig.VALUE_COL_NAME] = 0
self.all_tn = self.all_tn.sort_values(by=globalConfig.COL_NAMES_EDGES).reset_index(drop=True)
self.tn_edgeTypes = list(self.all_tn[globalConfig.EDGE_TYPE_COL_NAME].unique())
self.meta_edges_dic = {}
for metaEdge in utils.get_leaf_subclasses(meta.EdgeMetadata):
edgeType = str(metaEdge.EDGE_INMETA_CLASS.EDGE_TYPE)
node1Type = str(metaEdge.EDGE_INMETA_CLASS.NODE1_TYPE)
node2Type = str(metaEdge.EDGE_INMETA_CLASS.NODE2_TYPE)
if edgeType in self.tp_edgeTypes:
self.meta_edges_dic['%s_%s_%s'%(node1Type,edgeType,node2Type)] = (node1Type, edgeType, node2Type)
#nicetohave (2) check for transient onto edges
#transitiv_IS_A_edges = utils.check_for_transitive_edges(self.all_tp[self.all_tp[ttsConst.EDGE_TYPE_COL_NAME] == 'IS_A'])
#transitiv_PART_OF_edges = utils.check_for_transitive_edges(self.all_tp[self.all_tp[ttsConst.EDGE_TYPE_COL_NAME] == 'PART_OF'])
#if transitiv_IS_A_edges:
# print('WARNING: transient edges in IS_A: ({a},b,c) for a IS_A b and a IS_A c', transitiv_IS_A_edges)
#if transitiv_PART_OF_edges:
# print('WARNING: transient edges in PART_OF: ({a},b,c) for a PART_OF b and a PART_OF c',
# transitiv_PART_OF_edges)
#for time slices
if not (bool(t_minus_one_graph_path) == bool(t_minus_one_tn_graph_path) == (bool(t_minus_one_nodes_path))):
logging.error('either all three or none of these variables must be provided')
sys.exit()
if t_minus_one_nodes_path and t_minus_one_graph_path and t_minus_one_tn_graph_path:
with open(t_minus_one_nodes_path) as file:
self.tmo_nodes = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_NODES)
self.tmo_nodes = self.tmo_nodes.sort_values(by=globalConfig.COL_NAMES_NODES).reset_index(drop=True)
with open(t_minus_one_graph_path) as file:
self.tmo_all_tp = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_EDGES)
self.tmo_all_tp[globalConfig.VALUE_COL_NAME] = 1
self.tmo_all_tp = self.tmo_all_tp.sort_values(by=globalConfig.COL_NAMES_EDGES).reset_index(drop=True)
self.tmo_tp_edgeTypes = list(self.all_tp[globalConfig.EDGE_TYPE_COL_NAME].unique())
with open(t_minus_one_tn_graph_path) as file:
self.tmo_all_tn = pandas.read_csv(file, sep=sep, names=globalConfig.COL_NAMES_EDGES)
self.tmo_all_tn[globalConfig.VALUE_COL_NAME] = 0
self.tmo_all_tn = self.tmo_all_tn.sort_values(by=globalConfig.COL_NAMES_EDGES).reset_index(drop=True)
self.tmo_tn_edgeTypes = list(self.all_tp[globalConfig.EDGE_TYPE_COL_NAME].unique())
def random_edge_split(self, test_frac=None, val=None, crossval=None):
if not val:
val = 0.2
if not test_frac:
test_frac = 0.2
# create positive and negative examples
positive_samples = self.all_tp.copy()
negative_sampler = NegativeSampler(self.meta_edges_dic, self.tn_edgeTypes, self.all_tn.copy(), self.all_nodes)
negative_samples = negative_sampler.generate_random_neg_samples(positive_samples)
all_samples = (positive_samples.append(negative_samples, ignore_index=True)).reset_index(drop=True)
all_samples = utils.remove_inconsistent_edges(all_samples).reset_index(drop=True)
# generate, train-, test-, validation-sets
test_set = all_samples.sample(frac=test_frac, random_state=glob.RANDOM_STATE)
train_val_set = all_samples.drop(list(test_set.index.values))
test_set = utils.remove_parent_duplicates_and_reverses(remain_set=test_set, remove_set=train_val_set)
nodes_in_train_val_set = train_val_set[globalConfig.NODE1_ID_COL_NAME].tolist() \
+ train_val_set[globalConfig.NODE2_ID_COL_NAME].tolist()
new_test_nodes = self.get_additional_nodes(old_nodes_list=nodes_in_train_val_set,
new_nodes_list=self.all_nodes[globalConfig.ID_NODE_COL_NAME].tolist())
if new_test_nodes:
logging.info('The test set contains nodes, that are not present in the trainings-set. These edges will be dropped.') #nicetohave (6): option to keep edges with new nodes
test_set = self.remove_edges_with_nodes(test_set, new_test_nodes)
nodes_in_test_set = test_set[globalConfig.NODE1_ID_COL_NAME].tolist() \
+ test_set[globalConfig.NODE2_ID_COL_NAME].tolist()
if graphProp.DIRECTED:
train_val_set = utils.remove_reverse_edges(remain_set=train_val_set, remove_set=test_set)
if crossval:
train_val_set_tuples = self.create_cross_val(train_val_set, val)
new_val_nodes = None
for i, train_val_set_tuple in enumerate(train_val_set_tuples):
train_set, val_set = train_val_set_tuple
new_val_nodes = self.get_additional_nodes(old_nodes_list=train_set[globalConfig.NODE1_ID_COL_NAME].tolist()
+ train_set[globalConfig.NODE2_ID_COL_NAME].tolist(),
new_nodes_list=nodes_in_train_val_set)
if new_val_nodes: #nicetohave (6)
logging.info('Validation set %d contains nodes, that are not present in the trainings-set. These edges will be dropped.' %i)
val_set = self.remove_edges_with_nodes(val_set, new_val_nodes)
train_val_set_tuples[i]=(train_set, val_set)
else:
train_val_set_tuples = [(train_val_set, | pandas.DataFrame() | pandas.DataFrame |
from cmath import nan
import os
import sys
import subprocess
from io import StringIO
import pandas as pd
import argparse
import re
from config import *
import moneyforward as mf
import requests
import time
# Parse arguments
parser = argparse.ArgumentParser(description='Retrieves expense information of PayPay card')
parser.add_argument('-m', '--month', required=True, help="Month (in YYYYMM format) or 'latest'")
parser.add_argument('-d', '--delete-old-file', action='store_true', help="Delete old files for month after importing to MoneyForward (2 latest files will be keeped)")
parser.add_argument('-s', '--slack', action='store_true', help="Enable notification to Slack (optional)")
parser.add_argument('-c', '--add-category', action='store_true', help="Add category to expense record based on store name, using pre-defined CSV (/app/category.csv)")
args = parser.parse_args()
asset_name = "PayPayカード" # MoneyForwardでの登録名
category_preset_path = '/app/category.csv'
import pathlib
def get_latest_month():
data_dir = pathlib.Path('/data')
file_list = data_dir.glob('paypay_2*.tsv')
file_list_str = [str(p.resolve()) for p in file_list]
latest_file_name = sorted(file_list_str, reverse=True)[0]
latest_month = re.findall('/data/paypay_(\d{6})_.*.tsv', latest_file_name)[0]
return latest_month
def load_data(target_month):
data_dir = pathlib.Path('/data')
file_list = data_dir.glob('paypay_{}*.tsv'.format(target_month))
file_list_str = [str(p.resolve()) for p in file_list]
file_list_str = sorted(file_list_str, reverse=True)
if len(file_list_str) == 1:
data = file_list_str[0]
print("Loading {}".format(data))
df = | pd.read_csv(data, sep="\t") | pandas.read_csv |
from lxml import etree
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import Bio
from Bio import SeqIO
from pathlib import Path
import glob
#console
from tqdm import tqdm as tqdm
import re
import os
import itertools
#jupyter
#from tqdm import tqdm_notebook as tqdm
#not supported in current tqdm version
#from tqdm.autonotebook import tqdm
#import logging
#logging.getLogger('proteomics_utils').addHandler(logging.NullHandler())
#logger=logging.getLogger('proteomics_utils')
#for cd-hit
import subprocess
from sklearn.metrics import f1_score
import hashlib #for mhcii datasets
from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters
#######################################################################################################
#Parsing all sorts of protein data
#######################################################################################################
def parse_uniprot_xml(filename,max_entries=0,parse_features=[]):
'''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz)
using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
c.f. for full format https://www.uniprot.org/docs/uniprot.xsd
parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res)
(see the xsd file for all possible entries)
'''
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniprot(elem,rows,parse_features=parse_features)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def parse_func_uniprot(elem, rows, parse_features=[]):
'''extracting a single record from uniprot xml'''
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
sequence=""
#print(seqs)
for s in seqs:
sequence=s.text
#print("sequence",sequence)
if sequence =="" or str(sequence)=="None":
continue
else:
break
#Sequence & fragment
sequence=""
fragment_map = {"single":1, "multiple":2}
fragment = 0
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
for s in seqs:
if 'fragment' in s.attrib:
fragment = fragment_map[s.attrib["fragment"]]
sequence=s.text
if sequence != "":
break
#print("sequence:",sequence)
#print("fragment:",fragment)
#dataset
dataset=elem.attrib["dataset"]
#accession
accession = ""
accessions = elem.findall("{http://uniprot.org/uniprot}accession")
for a in accessions:
accession=a.text
if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!!
break
#print("accession",accession)
#protein existence (PE in plain text)
proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1}
proteinexistence = -1
accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence")
for a in accessions:
proteinexistence=proteinexistence_map[a.attrib["type"]]
break
#print("protein existence",proteinexistence)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniprot}name")
for n in names:
name=n.text
break
#print("name",name)
#organism
organism = ""
organisms = elem.findall("{http://uniprot.org/uniprot}organism")
for s in organisms:
s1=s.findall("{http://uniprot.org/uniprot}name")
for s2 in s1:
if(s2.attrib["type"]=='scientific'):
organism=s2.text
break
if organism !="":
break
#print("organism",organism)
#dbReference: PMP,GO,Pfam, EC
ids = elem.findall("{http://uniprot.org/uniprot}dbReference")
pfams = []
gos =[]
ecs = []
pdbs =[]
for i in ids:
#print(i.attrib["id"],i.attrib["type"])
#cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping
#http://geneontology.org/ontology/go-basic.obo for List of go terms
#https://www.uniprot.org/help/keywords_vs_go keywords vs. go
if(i.attrib["type"]=="GO"):
tmp1 = i.attrib["id"]
for i2 in i:
if i2.attrib["type"]=="evidence":
tmp2= i2.attrib["value"]
gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below)
elif(i.attrib["type"]=="Pfam"):
pfams.append(i.attrib["id"])
elif(i.attrib["type"]=="EC"):
ecs.append(i.attrib["id"])
elif(i.attrib["type"]=="PDB"):
pdbs.append(i.attrib["id"])
#print("PMP: ", pmp)
#print("GOs:",gos)
#print("Pfams:",pfam)
#print("ECs:",ecs)
#print("PDBs:",pdbs)
#keyword
keywords = elem.findall("{http://uniprot.org/uniprot}keyword")
keywords_lst = []
#print(keywords)
for k in keywords:
keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW-
#print("keywords: ",keywords_lst)
#comments = elem.findall("{http://uniprot.org/uniprot}comment")
#comments_lst=[]
##print(comments)
#for c in comments:
# if(c.attrib["type"]=="function"):
# for c1 in c:
# comments_lst.append(c1.text)
#print("function: ",comments_lst)
#ptm etc
if len(parse_features)>0:
ptms=[]
features = elem.findall("{http://uniprot.org/uniprot}feature")
for f in features:
if(f.attrib["type"] in parse_features):#only add features of the requested type
locs=[]
for l in f[0]:
locs.append(int(l.attrib["position"]))
ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"])
#print(ptms)
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence}
if len(parse_features)>0:
data_dict["features"]=ptms
#print("all children:")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
rows.append(data_dict)
def parse_uniprot_seqio(filename,max_entries=0):
'''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)'''
sprot = SeqIO.parse(filename, "uniprot-xml")
rows = []
for p in tqdm(sprot):
accession = str(p.name)
name = str(p.id)
dataset = str(p.annotations['dataset'])
organism = str(p.annotations['organism'])
ecs, pdbs, pfams, gos = [],[],[],[]
for ref in p.dbxrefs:
k = ref.split(':')
if k[0] == 'GO':
gos.append(':'.join(k[1:]))
elif k[0] == 'Pfam':
pfams.append(k[1])
elif k[0] == 'EC':
ecs.append(k[1])
elif k[0] == 'PDB':
pdbs.append(k[1:])
if 'keywords' in p.annotations.keys():
keywords = p.annotations['keywords']
else:
keywords = []
sequence = str(p.seq)
row = {
'ID': accession,
'name':name,
'dataset':dataset,
'organism':organism,
'ecs':ecs,
'pdbs':pdbs,
'pfams':pfams,
'keywords':keywords,
'gos':gos,
'sequence':sequence}
rows.append(row)
if(max_entries>0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def filter_human_proteome(df_sprot):
'''extracts human proteome from swissprot proteines in DataFrame with column organism '''
is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1
is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1
is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1
is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1
return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens]
def filter_aas(df, exclude_aas=["B","J","X","Z"]):
'''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q'''
return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))]
######################################################################################################
def explode_clusters_df(df_cluster):
'''aux. function to convert cluster dataframe from one row per cluster to one row per ID'''
df=df_cluster.reset_index(level=0)
rows = []
if('repr_accession' in df.columns):#include representative if it exists
_ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID'])
else:
_ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID"]).set_index(['ID'])
return df_exploded
def parse_uniref(filename,max_entries=0,parse_sequence=False, df_selection=None, exploded=True):
'''parse uniref (clustered sequences) xml ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file
using custom low-level parser https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
max_entries: only return first max_entries entries (0=all)
parse_sequences: return also representative sequence
df_selection: only include entries with accessions that are present in df_selection.index (None keeps all records)
exploded: return one row per ID instead of one row per cluster
c.f. for full format ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/README
'''
#issue with long texts https://stackoverflow.com/questions/30577796/etree-incomplete-child-text
#wait for end rather than start tag
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniref}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniref(elem,rows,parse_sequence=parse_sequence, df_selection=df_selection)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("entry_id")
df["num_members"]=df.members.apply(len)
if(exploded):
return explode_clusters_df(df)
return df
def parse_func_uniref(elem, rows, parse_sequence=False, df_selection=None):
'''extract a single uniref entry'''
#entry ID
entry_id = elem.attrib["id"]
#print("cluster id",entry_id)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniref}name")
for n in names:
name=n.text[9:]
break
#print("cluster name",name)
members=[]
#representative member
repr_accession = ""
repr_sequence =""
repr = elem.findall("{http://uniprot.org/uniref}representativeMember")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
if(repr_accession == ""):
repr_accession = s3.attrib["value"]#pick primary accession
members.append(s3.attrib["value"])
if parse_sequence is True:
s1=r.findall("{http://uniprot.org/uniref}sequence")
for s2 in s1:
repr_sequence = s2.text
if repr_sequence !="":
break
#print("representative member accession:",repr_accession)
#print("representative member sequence:",repr_sequence)
#all members
repr = elem.findall("{http://uniprot.org/uniref}member")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
members.append(s3.attrib["value"]) #add primary and secondary accessions
#print("members", members)
if(not(df_selection is None)): #apply selection filter
members = [y for y in members if y in df_selection.index]
#print("all children")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
if(len(members)>0):
data_dict={"entry_id": entry_id, "name": name, "repr_accession":repr_accession, "members":members}
if parse_sequence is True:
data_dict["repr_sequence"]=repr_sequence
rows.append(data_dict)
###########################################################################################################################
#proteins and peptides from fasta
###########################################################################################################################
def parse_uniprot_fasta(fasta_path, max_entries=0):
'''parse uniprot from fasta file (which contains less information than the corresponding xml but is also much smaller e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta)'''
rows=[]
dataset_dict={"sp":"Swiss-Prot","tr":"TrEMBL"}
for seq_record in tqdm(SeqIO.parse(fasta_path, "fasta")):
sid=seq_record.id.split("|")
accession = sid[1]
dataset = dataset_dict[sid[0]]
name = sid[2]
description = seq_record.description
sequence=str(seq_record.seq)
#print(description)
m = re.search('PE=\d', description)
pe=int(m.group(0).split("=")[1])
m = re.search('OS=.* (?=OX=)', description)
organism=m.group(0).split("=")[1].strip()
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":pe, "organism":organism, "sequence": sequence}
rows.append(data_dict)
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def proteins_from_fasta(fasta_path):
'''load proteins (as seqrecords) from fasta (just redirects)'''
return seqrecords_from_fasta(fasta_path)
def seqrecords_from_fasta(fasta_path):
'''load seqrecords from fasta file'''
seqrecords = list(SeqIO.parse(fasta_path, "fasta"))
return seqrecords
def seqrecords_to_sequences(seqrecords):
'''converts biopythons seqrecords into a plain list of sequences'''
return [str(p.seq) for p in seqrecords]
def sequences_to_fasta(sequences, fasta_path, sequence_id_prefix="s"):
'''save plain list of sequences to fasta'''
with open(fasta_path, "w") as output_handle:
for i,s in tqdm(enumerate(sequences)):
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(s), id=sequence_id_prefix+str(i), description="")
SeqIO.write(record, output_handle, "fasta")
def df_to_fasta(df, fasta_path):
'''Save column "sequence" from pandas DataFrame to fasta file using the index of the DataFrame as ID. Preserves original IDs in contrast to the function sequences_to_fasta()'''
with open(fasta_path, "w") as output_handle:
for row in df.iterrows():
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(row[1]["sequence"]), id=str(row[0]), description="")
SeqIO.write(record, output_handle, "fasta")
def sequences_to_df(sequences, sequence_id_prefix="s"):
data = {'ID': [(sequence_id_prefix+str(i) if sequence_id_prefix!="" else i) for i in range(len(sequences))], 'sequence': sequences}
df=pd.DataFrame.from_dict(data)
return df.set_index("ID")
def fasta_to_df(fasta_path):
seqs=SeqIO.parse(fasta_path, "fasta")
res=[]
for s in seqs:
res.append({"ID":s.id,"sequence":str(s.seq)})
return pd.DataFrame(res)
def peptides_from_proteins(protein_seqrecords, miss_cleavage=2,min_length=5,max_length=300):
'''extract peptides from proteins seqrecords by trypsin digestion
min_length: only return peptides of length min_length or greater (0 for all)
max_length: only return peptides of length max_length or smaller (0 for all)
'''
peptides = []
for seq in tqdm(protein_seqrecords):
peps = trypsin_digest(str(seq.seq), miss_cleavage)
peptides.extend(peps)
tmp=list(set(peptides))
if(min_length>0 and max_length>0):
tmp=[t for t in tmp if (len(t)>=min_length and len(t)<=max_length)]
elif(min_length==0 and max_length>0):
tmp=[t for t in tmp if len(t)<=max_length]
elif(min_length>0 and max_length==0):
tmp=[t for t in tmp if len(t)>=min_length]
print("Extracted",len(tmp),"unique peptides.")
return tmp
def trypsin_digest(proseq, miss_cleavage):
'''trypsin digestion of protein seqrecords
TRYPSIN from https://github.com/yafeng/trypsin/blob/master/trypsin.py'''
peptides=[]
cut_sites=[0]
for i in range(0,len(proseq)-1):
if proseq[i]=='K' and proseq[i+1]!='P':
cut_sites.append(i+1)
elif proseq[i]=='R' and proseq[i+1]!='P':
cut_sites.append(i+1)
if cut_sites[-1]!=len(proseq):
cut_sites.append(len(proseq))
if len(cut_sites)>2:
if miss_cleavage==0:
for j in range(0,len(cut_sites)-1):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
elif miss_cleavage==1:
for j in range(0,len(cut_sites)-2):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
elif miss_cleavage==2:
for j in range(0,len(cut_sites)-3):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+3]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-2]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-1]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
else: #there is no trypsin site in the protein sequence
peptides.append(proseq)
return list(set(peptides))
###########################################################################
# Processing CD-HIT clusters
###########################################################################
def clusters_df_from_sequence_df(df,threshold=[1.0,0.9,0.5],alignment_coverage=[0.0,0.9,0.8],memory=16000, threads=8, exploded=True, verbose=False):
'''create clusters df from sequence df (using cd hit)
df: dataframe with sequence information
threshold: similarity threshold for clustering (pass a list for hierarchical clustering e.g [1.0, 0.9, 0.5])
alignment_coverage: required minimum coverage of the longer sequence (to mimic uniref https://www.uniprot.org/help/uniref)
memory: limit available memory
threads: limit number of threads
exploded: return exploded view of the dataframe (one row for every member vs. one row for every cluster)
uses CD-HIT for clustering
https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide
copy cd-hit into ~/bin
TODO: extend to psi-cd-hit for thresholds smaller than 0.4
'''
if verbose:
print("Exporting original dataframe as fasta...")
fasta_file = "cdhit.fasta"
df_original_index = list(df.index) #reindex the dataframe since cdhit can only handle 19 letters
df = df.reset_index(drop=True)
df_to_fasta(df, fasta_file)
if(not(isinstance(threshold, list))):
threshold=[threshold]
alignment_coverage=[alignment_coverage]
assert(len(threshold)==len(alignment_coverage))
fasta_files=[]
for i,thr in enumerate(threshold):
if(thr< 0.4):#use psi-cd-hit here
print("thresholds lower than 0.4 require psi-cd-hit.pl require psi-cd-hit.pl (building on BLAST) which is currently not supported")
return pd.DataFrame()
elif(thr<0.5):
wl = 2
elif(thr<0.6):
wl = 3
elif(thr<0.7):
wl = 4
else:
wl = 5
aL = alignment_coverage[i]
#cd-hit -i nr -o nr80 -c 0.8 -n 5
#cd-hit -i nr80 -o nr60 -c 0.6 -n 4
#psi-cd-hit.pl -i nr60 -o nr30 -c 0.3
if verbose:
print("Clustering using cd-hit at threshold", thr, "using wordlength", wl, "and alignment coverage", aL, "...")
fasta_file_new= "cdhit"+str(int(thr*100))+".fasta"
command = "cd-hit -i "+fasta_file+" -o "+fasta_file_new+" -c "+str(thr)+" -n "+str(wl)+" -aL "+str(aL)+" -M "+str(memory)+" -T "+str(threads)
if(verbose):
print(command)
process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
fasta_files.append(fasta_file)
if(i==len(threshold)-1):
fasta_files.append(fasta_file_new)
fasta_file= fasta_file_new
#join results from all clustering steps
if verbose:
print("Joining results from different clustering steps...")
for i,f in enumerate(reversed(fasta_files[1:])):
if verbose:
print("Processing",f,"...")
if(i==0):
df_clusters = parse_cdhit_clstr(f+".clstr",exploded=False)
else:
df_clusters2 = parse_cdhit_clstr(f+".clstr",exploded=False)
for id,row in df_clusters.iterrows():
members = row['members']
new_members = [list(df_clusters2[df_clusters2.repr_accession==y].members)[0] for y in members]
new_members = [item for sublist in new_members for item in sublist] #flattened
row['members']=new_members
df_clusters["members"]=df_clusters["members"].apply(lambda x:[df_original_index[int(y)] for y in x])
df_clusters["repr_accession"]=df_clusters["repr_accession"].apply(lambda x:df_original_index[int(x)])
if(exploded):
return explode_clusters_df(df_clusters)
return df_clusters
def parse_cdhit_clstr(filename, exploded=True):
'''Aux. Function (used by clusters_df_from_sequence_df) to parse CD-HITs clstr output file in a similar way as the uniref data
for the format see https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide#CDHIT
exploded: single row for every ID instead of single for every cluster
'''
def save_cluster(rows,members,representative):
if(len(members)>0):
rows.append({"entry_id":filename[:-6]+"_"+representative, "members":members, "repr_accession":representative})
rows=[]
with open(filename, 'r') as f:
members=[]
representative=""
for l in tqdm(f):
if(l[0]==">"):
save_cluster(rows,members,representative)
members=[]
representative=""
else:
member=(l.split(">")[1]).split("...")[0]
members.append(member)
if "*" in l:
representative = member
save_cluster(rows,members,representative)
df=pd.DataFrame(rows).set_index("entry_id")
if(exploded):
return explode_clusters_df(df)
return df
###########################################################################
# MHC DATA
###########################################################################
######### Helper functions ##########
def _label_binder(data, threshold=500, measurement_column="meas"):
# Drop entries above IC50 > 500nM with inequality < (ambiguous)
to_drop = (( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold))).mean()
if to_drop > 0:
print('Dropping {} % because of ambiguous inequality'.format(to_drop))
data = data[~(( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold)))]
# Labeling
data['label'] = (1* data[measurement_column]<=threshold).astype(int)
return data
def _transform_ic50(data, how="log",max_ic50=50000.0, inequality_offset=True, label_column="meas"):
"""Transform ic50 measurements
how: "log" logarithmic transform, inequality "=" mapped to [0,1], inequality ">" mapped to [2,3], inequality "<" mapped to [4,5]
"norm"
"cap"
"""
x = data[label_column]
if how=="cap":
x = np.minimum(x, 50000)
elif how=="norm":
x = np.minimum(x, 50000)
x = (x - x.mean()) / x.std()
elif how=="log":
# log transform
x = 1 - (np.log(x)/np.log(max_ic50))
x = np.minimum(1.0, np.maximum(0.0,x))
if(inequality_offset):
# add offsets for loss
offsets = pd.Series(data['inequality']).map({'=': 0, '>': 2, '<': 4,}).values
x += offsets
return x
def _string_index(data):
# Add prefix letter "a" to the numerical index (such that it is clearly a string in order to avoid later errors).
data["ID"] = data.index
data["ID"] = data["ID"].apply(lambda x: "a"+ str(x))
data = data.set_index(["ID"])
return data
def _format_alleles(x):
if x[:3]=='HLA':
return x[:5]+'-'+x[6:8]+x[9:]
if x[:4]=='Mamu':
return x[:6]+'-'+x[7:]
else:
return x
def _get_allele_ranking(data_dir='.'):
'''
Allele ranking should be the same across different datasets (noMS, withMS) to avoid confusion.
Thus, the ranking is based on the larger withMS dataset
'''
data_dir = Path(data_dir)
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
df = pd.read_csv(curated_withMS_path)
# Drop duplicates
df = df.drop_duplicates(["allele", "peptide","measurement_value"])
lens = df['peptide'].apply(len)
df = df[(lens>7) & (lens<16)]
# Keep only alleles with min 25 peptides like MHC flurry
peptides_per_allele = df.groupby('allele').size()
alleles_select = peptides_per_allele[peptides_per_allele>24].index
df = df[df['allele'].isin(alleles_select)]
mhc_rank = df.groupby('allele').size().sort_values(ascending=False).reset_index()['allele']
return mhc_rank
def netmhpan_4_0_special_allele_map(allele):
minus_idx = allele.find("-")
pre, post = allele[:minus_idx], allele[minus_idx+1:]
if pre=="Mamu":
special_map = {"A01": "A1*00101",
"A02": "A1*00201",
"A07": "A1*00701",
"A11": "A1*01101",
"A2201": "A1*02201",
"A2601": "A1*02601",
'A20102': "A2*00102", # "A2*0102"
"A70103": "A7*00103", # "A7*0103"
"B01": "B*00101",
"B03": "B*00301",
"B04": "B*00401",
"B08": "B*00801",
"B17": "B*01701",
"B52": "B*05201",
"B1001": "B*01001",
'B3901': "B*03901", #?
'B6601': "B*06601", #?
'B8301': "B*08301", #?
'B8701': "B*08701", #?
}
if post in special_map.keys():
post = special_map[post]
elif pre=="BoLA":
#source: select allele menu on http://www.cbs.dtu.dk/services/NetMHCpan-4.0/
special_map = {
"D18.4": "1:02301",
"T2a": "2:01201",
"AW10": "3:00101",
"JSP.1": "3:00201",
"HD6": "6:01301",
"T2b": "6:04101"
}
if post in special_map.keys():
post = special_map[post]
return pre + "-" + post
def prepare_pseudo_mhc_sequences(mhc_class, data_dir='.'):
"""
The pseudo sequences are provided with the NetMHCpan4.1/NetMHCIIpan4.0 data.
"""
data_path = Path(data_dir)
if mhc_class=="II":
pseudo_seq_file = "NetMHCIIpan_train/pseudosequence.2016.all.X.dat"
else:
pseudo_seq_file = "NetMHCpan_4_1_train/MHC_pseudo.dat"
pseudo_mhc = []
with open(data_path/pseudo_seq_file, "r") as f:
for line in f:
allele, seq = line.split()
pseudo_mhc.append((allele,seq))
pseudo_mhc = pd.DataFrame(pseudo_mhc, columns=("allele", "sequence1"))
pseudo_mhc = pseudo_mhc[~pseudo_mhc["allele"].duplicated()]
return pseudo_mhc
########## Generate DataFrame ##########
def generate_mhc_kim(cv_type=None, mhc_select=0, regression=False, transform_ic50=None, to_csv=False, filename=None, data_dir='.', keep_all_alleles=False):
'''
cv_type: string, strategy for 5-fold cross validation, options:
- None: No cv-strategy, cv column is filled with 'TBD'
- sr: removal of similar peptides seperatly in binder/ non-binder set, using similarity threshold of 80%, similarity found with 'Hobohm 1 like algorithm'
- gs: grouping similar peptides in the same cv-partition
- rnd: random partioning
transform_ic50: string, ignnored if not regression
- None: use raw ic50 measurements as labels
- cap: cap ic50 meas at 50000
- norm: cap ic50 meas at 50000 and normalize
- log: take log_50000 and cap at 50000
mhc_select: int between 0 and 50, choose allele by frequency rank in Binding Data 2009
'''
# Binding Data 2009. Used by Kim et al for Cross Validation. Used by MHCnugget for training.
bd09_file = 'bdata.2009.mhci.public.1.txt'
# Similar peptides removed
bd09_cv_sr_file = 'bdata.2009.mhci.public.1.cv_sr.txt'
# Random partioning
bd09_cv_rnd_file = 'bdata.2009.mhci.public.1.cv_rnd.txt'
# Similar peptides grouped
bd09_cv_gs_file = 'bdata.2009.mhci.public.1.cv_gs.txt'
# 'blind' used by Kim et al to estimate true predicitve accuracy. Used by MHCnugget for testing.
# Generated by subtracting BD2009 from BD 2013 and removing similar peptides with respect to BD2009
# (similar = at least 80% similarity and same length)
bdblind_file = 'bdata.2013.mhci.public.blind.1.txt'
data_dir = Path(data_dir)/"benchmark_mhci_reliability/binding"
# Read in data with specified cv type
if cv_type=='sr':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_sr_file, sep='\t')
elif cv_type=='gs':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_gs_file, sep='\t')
elif cv_type=='rnd':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_rnd_file, sep='\t')
else:
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_file, sep='\t')
# Read in blind data
bdblind = pd.read_csv(data_dir/'blind.1'/bdblind_file, sep='\t')
# alleles are spelled differently in bdblind and bd2009, change spelling in bdblind
bdblind['mhc'] = bdblind['mhc'].apply(_format_alleles)
# Confirm there is no overlap
print('{} entries from the blind data set are in the 2009 data set'.format(bdblind[['sequence', 'mhc']].isin(bd09[['sequence', 'mhc']]).all(axis=1).sum()))
if regression:
# For now: use only quantitative measurements, later tuple (label, inequality as int)
#print('Using quantitative {} % percent of the data.'.format((bd09['inequality']=='=').mean()))
#bd09 = bd09[bd09['inequality']=='=']
#bd09.rename(columns={'meas':'label'}, inplace=True)
#bdblind = bdblind[bdblind['inequality']=='=']
#bdblind.rename(columns={'meas':'label'}, inplace=True)
# Convert ic50 measurements to range [0,1]
if transform_ic50 is not None:
bd09['label'] = _transform_ic50(bd09, how=transform_ic50)
bdblind['label'] = _transform_ic50(bdblind, how=transform_ic50)
else:
# Labeling for binder/NonBinder
bd09 = _label_binder(bd09)[['mhc', 'sequence', 'label', 'cv']]
#bdblind = _label_binder(bdblind)[['mhc', 'sequence', 'label', 'cv']]
bdblind = bdblind.rename(columns={"meas":"label"})
if not keep_all_alleles:
# in bd09 (train set) keep only entries with mhc also occuring in bdblind (test set)
bd09 = bd09[bd09['mhc'].isin(bdblind['mhc'])]
# Combine
bdblind['cv'] = 'blind'
bd = pd.concat([bd09, bdblind], ignore_index=True)
if not(regression):
# Test if there is at least one binder in bd09 AND bdblind
min_one_binder = pd.concat([(bd09.groupby('mhc')['label'].sum() > 0), (bdblind.groupby('mhc')['label'].sum() > 0)], axis=1).all(axis=1)
print('For {} alleles there is not at least one binder in bd 2009 AND bd blind. These will be dismissed.'.format((~min_one_binder).sum()))
alleles = bd['mhc'].unique()
allesles_to_keep = alleles[min_one_binder]
# Dismiss alleles without at least one binder
bd = bd[bd['mhc'].isin(allesles_to_keep)]
# Make allele ranking based on binding data 2009
mhc_rank = bd[bd['cv']!='blind'].groupby('mhc').size().sort_values(ascending=False).reset_index()['mhc']
# Select allele
if mhc_select is not None:
print('Selecting allele {}'.format(mhc_rank.loc[mhc_select]))
bd = bd[bd['mhc']==mhc_rank.loc[mhc_select]][['sequence', 'label', 'cv']]
# Turn indices into strings
bd = _string_index(bd)
if to_csv and filename is not None:
bd.to_csv(filename)
return bd
def generate_mhc_flurry(ms='noMS', mhc_select=0, regression=False, transform_ic50=None, binder_threshold=500, filter_length=True, label_binary=False, random_seed=42,data_dir='.'):
'''
Load the MHC I data curated and uploaded to https://data.mendeley.com/datasets/8pz43nvvxh/1 by MHCFlurry
Used by them for training and model selection
ms: string, specifies if mass spectroscopy data should be included, options:
- noMS: MHCFlurry no MS dataset
- withMS: MHCFlurry with MS dataset
mhc_select: int between 0 and 150 (noMS)/ 188 (withMS), choose allele by frequency rank
filter_length: boolean, MHCFlurry selected peptides of length 8-15 (their model only deals with these lengths)
'''
data_path = Path(data_dir)
curated_noMS_path = data_path/'data_curated.20180219'/'curated_training_data.no_mass_spec.csv'
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
if ms=='noMS':
df = pd.read_csv(curated_noMS_path)
elif ms=='withMS':
df = | pd.read_csv(curated_withMS_path) | pandas.read_csv |
import streamlit as st
import plotly_express as px
import pandas as pd
from plotnine import *
from plotly.tools import mpl_to_plotly as ggplotly
import numpy as np
import math
import scipy.stats as ss
from scipy.stats import *
def app():
# add a select widget to the side bar
st.sidebar.subheader("Discrete Probaility")
prob_choice = st.sidebar.radio("",["Discrete Probability","Binomial Probability","Geometric Probability","Poisson Probability"])
st.markdown('Discrete Probability')
if prob_choice == "Discrete Probability":
top = st.columns((1,1,2))
bottom = st.columns((1,1))
with top[0]:
#st.subheader("Discrete Probaility")
gs_URL = st.session_state.gs_URL
googleSheetId = gs_URL.split("spreadsheets/d/")[1].split("/edit")[0]
worksheetName = st.text_input("Sheet Name:","Discrete")
URL = f'https://docs.google.com/spreadsheets/d/{googleSheetId}/gviz/tq?tqx=out:csv&sheet={worksheetName}'
if st.button('Refresh'):
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
df = pd.read_csv(URL)
df = df.dropna(axis=1, how="all")
with bottom[0]:
st.dataframe(df)
global numeric_columns
global non_numeric_columns
try:
numeric_columns = list(df.select_dtypes(['float', 'int']).columns)
non_numeric_columns = list(df.select_dtypes(['object']).columns)
except Exception as e:
print(e)
st.write("Please upload file to the application.")
with top[1]:
x_axis = st.selectbox('X-Axis', options=numeric_columns, index=0)
prob = st.selectbox('Probabilities', options=numeric_columns, index = 1)
cat = 0
if len(non_numeric_columns) >= 1:
cat = 1
#cv = st.selectbox("Group", options=list(df[non_numeric_columns[0]].unique()))
if cat == 0:
x = df[x_axis]
p_x = df[prob]
m = sum(x*p_x)
sd = math.sqrt(sum((x-m)**2*p_x))
data = pd.DataFrame({"Mean":m,"Std Dev":sd},index = [0])
with top[2]:
dph = ggplot(df) + geom_bar(aes(x=df[df.columns[0]],weight=df[df.columns[1]]),color="darkblue", fill="lightblue")
st.pyplot(ggplot.draw(dph))
with bottom[1]:
st.write(data)
if cat != 0:
with bottom[1]:
data = | pd.DataFrame(columns = ['Type','Mean','Standard Deviation']) | pandas.DataFrame |
#py_time_series_basic.py
import datetime
import pandas as pd
index=pd.date_range(start='1/1/2017', periods=5, freq='d')
index=pd.date_range(start='Jan 1 2017', periods=5, freq='d')
index=pd.date_range(start=datetime.datetime(2017,1,1), \
periods=5, freq='d')
index=pd.date_range(start='1 Jan 2017', end='5 Jan 2017')
print(index)
print()
data=[10, 20, 30, 40]
index= | pd.date_range(start='1 Jan 2017', end='4 Jan 2017') | pandas.date_range |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
| tm.assert_series_equal(res2, expected_add) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
# @Time : 3/3/21 12:25 PM
# @Author : Jingnan
# @Email : <EMAIL>
import csv
import glob
import os
import pandas as pd
# import streamlit as st
from tqdm import tqdm
def search_files(data_dir, cell, prefix):
data_files = sorted(glob.glob(os.path.join(data_dir, "*" + str(cell) + "_" + prefix)))
light_intensity_ls = [os.path.basename(data_file).split("_")[0] for data_file in data_files]
intensity_dict = {}
for intensity, data_file in zip(light_intensity_ls, data_files):
intensity_dict[intensity] = data_file
intensity_int_dict = {}
for intensity in light_intensity_ls:
if "dark" in intensity:
continue
if "-" in intensity:
raise Exception(f"intensity文件包含‘-’,请删除此文件: {intensity}")
# try:
# if use_second_test:
# intensity_int_dict[float(intensity.split("-")[0])] = intensity_dict[intensity]
#
# intensity_dict.pop(intensity.split("-")[0])
# else:
# intensity_int_dict[float(intensity.split("-")[0])] = intensity_dict[intensity]
#
# intensity_dict.pop(intensity)
# except KeyError:
# pass
else:
try:
intensity_int_dict[float(intensity)] = intensity_dict[intensity]
except KeyError:
pass
data_files = []
intesity_int_ls = []
for intensity, data_file in intensity_int_dict.items():
intesity_int_ls.append(intensity)
data_files.append(data_file)
try:
light_intensity_ls, data_files = zip(*sorted(zip(intesity_int_ls, data_files)))
except ValueError:
light_intensity_ls, data_files = [], []
return light_intensity_ls, data_files
def reorganize_data(device_ids, father_dir, targt_dir):
for device_id in device_ids:
fpath = os.path.join(father_dir, device_id)
out_file = os.path.join(targt_dir, device_id+ "_data.xlsx")
data_ls = []
cells = [1]
for idx, cell in enumerate(cells):
light_intensity_ls, data_files = search_files(data_dir=fpath, cell=cell, prefix="data.txt")
if len(light_intensity_ls)==0:
data_np = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import akshare as ak
import matplotlib.pyplot as plt
import matplotlib
import itertools
matplotlib.rc("font", family="PingFang HK")
def get_fund_categories(open_fund=False):
fund_em_fund_name_df = ak.fund_em_fund_name()
if open_fund:
fund_em_open_fund_daily_df = ak.fund_em_open_fund_daily()
df = pd.merge(fund_em_open_fund_daily_df, fund_em_fund_name_df, on="基金代码")
fund_categories = np.unique(df["基金类型"].values)
else:
fund_categories = np.unique(fund_em_fund_name_df["基金类型"].values)
return fund_categories
def get_category_all_funds():
Total_df = current_open_fund_mergered()
JJcate = np.unique(Total_df["基金大类"].values)
code_cate_dict = {}
for cate in JJcate:
cate_df = Total_df[
(Total_df["基金大类"] == cate)
& (Total_df["申购状态"] == "开放申购")
& (Total_df["赎回状态"] == "开放赎回")
& (Total_df["日增长率"] != "")
]
code_cate_dict.update({cate: cate_df["基金代码"].values})
return code_cate_dict
def get_fund_net_worth(fund_code, start_date, end_date, fund_category):
"""
:param fund_code: string, input a fund code
:param start_date: string, input a date format 'yyyy-mm-dd'
:param end_date: string, input a date format 'yyyy-mm-dd'
:param fund_category: string, input either ['open', 'money', 'financial', 'etf']
:return: dataframe, sliced dataframe between start_date and end_date
"""
start_date = pd.to_datetime(start_date, format="%Y/%m/%d")
end_date = pd.to_datetime(end_date, format="%Y/%m/%d")
if fund_category == "open":
df = ak.fund_em_open_fund_info(fund=fund_code)
elif fund_category == "money":
df = ak.fund_em_money_fund_info(fund=fund_code)
df["净值日期"] = pd.to_datetime(df["净值日期"], format="%Y/%m/%d")
elif fund_category == "financial":
df = ak.fund_em_financial_fund_info(fund=fund_code)
df["净值日期"] = pd.to_da | tetime(df["净值日期"], format="%Y/%m/%d") | pandas.to_datetime |
"""The main pipeline to identify sidelobes in VLASS using a SOM.
Requirements:
- The VLASS Component catalogue
- ALL image cutouts (VLASS only) for the specified sample
Provided in this repo:
- SOM_B3_h10_w10_vlass.bin: The trained SOM binary file.
- neuron_info.csv: Contains the sidelobe probabilities for each neuron.
The process is conducted as follows:
1. Preprocess the data
- Creates an Image binary
2. Map the Image binary onto the SOM
- Creates the MAP and TRANSFORM binaries
3. Update the Component catalogue using the results from the Mapping.
- Outputs the final Component catalogue plus two other catalogues
that are useful for debugging.
**Note: Step (2) *must* be run on a GPU, unless you are fine with
waiting over a month (probably) for it to complete!
"""
import os, sys
import argparse
import subprocess
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
from reproject import reproject_interp
import pyink as pu
def filename(objname, survey="DECaLS-DR8", format="fits"):
"""Convert a Component_name into a filename. Another method can
be used as long as it is consistently applied."""
# Take Julian coords of name to eliminate white space - eliminate prefix
name = objname.split(" ")[1]
fname = f"{name}_{survey}.{format}"
return fname
def load_catalogue(catalog, flag_data=False, flag_SNR=False, pandas=False, **kwargs):
"""Load the Component catalogue in as either an astropy.Table (default)
or a pandas.DataFrame (if pandas=True).
Optionally apply common flags.
"""
fmt = "fits" if catalog.endswith("fits") else "csv"
rcat = Table.read(catalog, format=fmt)
if flag_data:
rcat = rcat[rcat["S_Code"] != "E"]
rcat = rcat[rcat["Duplicate_flag"] < 2]
if flag_SNR:
rcat = rcat[rcat["Peak_flux"] >= 5 * rcat["Isl_rms"]]
rcat["SNR"] = rcat["Total_flux"] / rcat["Isl_rms"]
if pandas:
rcat = rcat.to_pandas()
if fmt == "fits":
for col in rcat.columns[rcat.dtypes == object]:
rcat[col] = rcat[col].str.decode("ascii")
return rcat
def load_fits(filename, ext=0):
hdulist = fits.open(filename)
d = hdulist[ext]
return d
def load_radio_fits(filename, ext=0):
"""Load the data from a single extension of a fits file."""
hdu = load_fits(filename, ext=ext)
wcs = WCS(hdu.header).celestial
hdu.data = np.squeeze(hdu.data)
hdu.header = wcs.to_header()
return hdu
def recenter_regrid(hdu, ra, dec, img_size, pix_size=0.6, reproj_hdr=None):
"""Update the header info for an image such that it is defined relative
to its central pixel. Additionally, reproject the image and regrid the
pixels onto a new scale so that all images have the same pixel size.
"""
# Recentering the reference pixel
if reproj_hdr is None:
reproj_hdr = hdu.header.copy()
reproj_hdr["CRVAL1"] = ra
reproj_hdr["CRVAL2"] = dec
reproj_hdr["CRPIX1"] = img_size[0] // 2 + 0.5
reproj_hdr["CRPIX2"] = img_size[1] // 2 + 0.5
reproj_hdr["CDELT1"] = np.sign(reproj_hdr["CDELT1"]) * pix_size / 3600
reproj_hdr["CDELT2"] = np.sign(reproj_hdr["CDELT2"]) * pix_size / 3600
reproj_wcs = WCS(reproj_hdr).celestial
reproj_data, reproj_footprint = reproject_interp(
hdu, reproj_wcs, shape_out=img_size
)
return reproj_data
def scale_data(data, log=False, minsnr=None):
"""Scale the data so that the SOM behaves appropriately."""
img = np.zeros_like(data)
noise = pu.rms_estimate(data[data != 0], mode="mad", clip_rounds=2)
# data - np.median(remove_zeros)
if minsnr is not None:
mask = data >= minsnr * noise
else:
mask = np.ones_like(data, dtype=bool)
data = data[mask]
if log:
data = np.log10(data)
img[mask] = pu.minmax(data)
return img.astype(np.float32)
def radio_preprocess(idx, sample, path="images", img_size=(150, 150), **kwargs):
"""Preprocess a VLASS image.
"""
try:
radio_comp = sample.iloc[idx]
radio_file = radio_comp["filename"]
radio_file = os.path.join(path, radio_file)
radio_hdu = load_radio_fits(radio_file)
radio_data = radio_hdu.data
if radio_data.shape != img_size:
radio_data = recenter_regrid(
radio_hdu,
radio_comp["RA"],
radio_comp["DEC"],
img_size=img_size,
pix_size=0.6,
)
return idx, scale_data(radio_data, **kwargs)
except Exception as e:
print(f"Failed on index {idx}: {e}")
return None
def run_prepro(sample, outfile, shape=(150, 150), threads=None, **kwargs):
"""Preprocess all VLASS images, creating an image binary.
Note that the parallization is not working properly, and actually
results in a slow-down if the sample size is too large.
"""
with pu.ImageWriter(outfile, 0, shape, clobber=True) as pk_img:
if threads is None:
threads = cpu_count()
pool = Pool(processes=threads)
results = [
pool.apply_async(radio_preprocess, args=(idx, sample), kwds=kwargs)
for idx in sample.index
]
for res in tqdm(results):
out = res.get()
if out is not None:
pk_img.add(out[1], attributes=out[0])
def run_prepro_seq(sample, outfile, shape=(150, 150), **kwargs):
"""Sequential preprocessing for all VLASS images.
"""
with pu.ImageWriter(outfile, 0, shape, clobber=True) as pk_img:
for idx in tqdm(sample.index):
out = radio_preprocess(idx, sample, img_size=shape, **kwargs)
if out is not None:
pk_img.add(out[1], attributes=out[0])
def map_imbin(
imbin_file,
som_file,
map_file,
trans_file,
som_width,
som_height,
numthreads=4,
cpu=False,
nrot=360,
log=True,
):
"""Map an image binary onto a SOM using Pink.
"""
commands = [
"Pink",
"--map",
imbin_file,
map_file,
som_file,
"--numthreads",
f"{numthreads}",
"--som-width",
f"{som_width}",
"--som-height",
f"{som_height}",
"--store-rot-flip",
trans_file,
"--euclidean-distance-shape",
"circular",
"-n",
str(nrot),
]
if cpu:
commands += ["--cuda-off"]
if log:
map_logfile = map_file.replace(".bin", ".log")
with open(map_logfile, "w") as log:
subprocess.run(commands, stdout=log)
else:
subprocess.run(commands)
def fill_duplicates(cat, cols):
"""Since duplicates are excluded in the catalogue (to save time),
they must be filled in to create a complete catalogue.
Due to nuances in the flagging routine, this must be done iteratively.
"""
# Fill in `cols` for duplicates by searching for matches in the rest
# of the duplicate components.
# Need to apply this multiple times because of the duplicate flagging algorithm.
missing_comps = cat[(cat.Duplicate_flag >= 1) & np.isnan(cat[cols[0]])]
not_missing_comps = cat[(cat.Duplicate_flag >= 1) & ~np.isnan(cat[cols[0]])]
missing_coords = SkyCoord(
missing_comps["RA"].values, missing_comps["DEC"].values, unit=u.deg
)
not_missing_coords = SkyCoord(
not_missing_comps["RA"].values, not_missing_comps["DEC"].values, unit=u.deg
)
idx1, idx2, sep, dist = search_around_sky(
missing_coords, not_missing_coords, seplimit=2 * u.arcsec
)
# When multiple matches are found, choose the one with the highest SNR
idx1u, idx1c = np.unique(idx1, return_counts=True)
idx2u = [
idx2[idx1 == i1][0]
if i1c == 1
else idx2[idx1 == i1][final_cat.iloc[idx2[idx1 == i1]]["SNR"].argmax()]
for i1, i1c in zip(idx1u, idx1c)
]
for col in cols:
cat.loc[missing_comps.iloc[idx1].index, col] = (
not_missing_comps[col].iloc[idx2].values
)
def fill_all_duplicates(cat, cols):
nan_count = 0
while np.sum(np.isnan(cat[cols[0]])) != nan_count:
nan_count = np.sum(np.isnan(cat[cols[0]]))
fill_duplicates(cat, cols)
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(
description="Add sidelobe info to VLASS component catalogue."
)
parser.add_argument(
dest="catalogue", help="VLASS component catalogue", type=str,
)
parser.add_argument(
dest="outfile", help="Name for the updated component catalogue", type=str,
)
parser.add_argument(
"-p",
"--cutout_path",
dest="cutout_path",
help="Path to the directory containing the input fits images",
default="images",
type=str,
)
parser.add_argument(
"-s", "--som", dest="som_file", help="The SOM binary file", type=str,
)
parser.add_argument(
"-n",
"--neuron_table",
dest="neuron_table_file",
help="The table of properties for each SOM neuron",
type=str,
)
parser.add_argument(
"-t",
"--threads",
dest="threads",
help="Number of threads to use for multiprocessing",
default=cpu_count(),
type=int,
)
parser.add_argument(
"--cpu",
dest="cpu",
help="Run PINK in cpu mode instead of gpu mode",
default=False,
type=bool,
)
parser.add_argument(
"--overwrite",
dest="clobber",
help="Overwrite the Image and Map binaries",
default=False,
type=bool,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
catalogue = args.catalogue
cutout_path = args.cutout_path
som_file = args.som_file
neuron_table_file = args.neuron_table_file
threads = args.threads
clobber = args.clobber
cat_name = ".".join(os.path.basename(catalogue).split(".")[:-1])
imbin_file = f"IMG_{cat_name}.bin"
sample = load_catalogue(catalogue, flag_data=False, flag_SNR=False, pandas=True)
sample["filename"] = sample["Component_name"].apply(filename, survey="VLASS")
# Subset on Duplicate_flag, then fill in those values later
sample = sample[sample["Duplicate_flag"] < 2].reset_index(drop=True)
if not os.path.exists(imbin_file) or clobber:
run_prepro_seq(
sample,
imbin_file,
shape=(150, 150),
path=cutout_path,
# threads=threads,
log=True,
minsnr=2,
)
else:
print(f"Image binary {imbin_file} already exists...skipping.")
# EXIT HERE if mapping is being conducted on a different machine
# sys.exit(1)
# Map the image binary through the SOM
som = pu.SOM(som_file)
som_width, som_height, ndim = som.som_shape
map_file = imbin_file.replace("IMG", "MAP")
trans_file = map_file.replace("MAP", "TRANSFORM")
if not os.path.exists(map_file) or not os.path.exists(trans_file) or clobber:
map_imbin(
imbin_file,
som_file,
map_file,
trans_file,
som_width,
som_height,
numthreads=cpu_count(),
cpu=args.cpu,
nrot=360,
log=True,
)
else:
print(
f"Mapping binary {map_file} and Transform file {trans_file} already exist...skipping."
)
# CONTINUE HERE if Mapping was done on a different machine.
# Update the component catalogue with the sidelobe probability
imgs = pu.ImageReader(imbin_file)
# Output a table of components that failed to preprocess
# This is usually due to missing files
failed = sample.iloc[list(set(sample.index).difference(imgs.records))].reset_index(
drop=True
)
Table.from_pandas(failed).write(f"{cat_name}_failed.fits", overwrite=True)
# Output a table of all preprocessed components.
# This matches the length of the IMG and MAP binaries, which makes
# it useful for inspecting the results.
sample = sample.iloc[imgs.records].reset_index(drop=True)
Table.from_pandas(sample).write(f"{cat_name}_preprocessed.fits", overwrite=True)
del imgs
# Determine the best-matching neurons and their Euclidean distances
somset = pu.SOMSet(som, map_file, trans_file)
sample["bmu"] = somset.mapping.bmu(return_tuples=True)
sample["Neuron_dist"] = somset.mapping.bmu_ed()
bmu = somset.mapping.bmu()
sample["Best_neuron_y"] = bmu[:, 0]
sample["Best_neuron_x"] = bmu[:, 1]
# Neuron table is one row per neuron. Reshape P_sidelobe into an array.
neuron_table = | pd.read_csv(neuron_table_file) | pandas.read_csv |
import sys
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from termcolor import colored
from os import listdir
from os.path import isfile, join
def load_daily_cases(d):
"""
Load all daily cases (CSV per date) from the containing directory
@param d path to the directory containing daily case CSV files
"""
def get_date(t):
[m,d,y] = t.split(".")[0].split("-")
return "-".join([y,m,d])
# List all daily case files
q = join(d, "csse_covid_19_data/csse_covid_19_daily_reports")
csv_list = [f for f in listdir(q) if isfile(join(q, f)) and f.endswith(".csv")]
daily = []
for tag in csv_list:
print(colored("Reading : ", "cyan"), tag)
df = pd.read_csv(join(q,tag), header="infer")
df["date"] = get_date(tag)
daily.append(df)
daily = | pd.concat(daily) | pandas.concat |
import os
import pathlib
#import tarfile
import urllib.request
import pandas as pd
import spacy
import string
#import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from pickle import dump, load
import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
#Custom transformer using Python standard library (you could use spacy as well)
class predictors(TransformerMixin):
"""Class used to perform the first step of pipeline.
This consists in lower case all sentences.
"""
# This function will clean the text
def clean_text(self,text):
return text.strip().lower()
def transform(self, X, **transform_params):
return [self.clean_text(text) for text in X]
#return [text for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}
class SentimentTrain(object):
""" Class used to train the sentiment analysis model
Attributes:
data_path (str): path where the text files can be found.
"""
def __init__(self,data_path):
self.data_path=os.path.join(pathlib.Path().absolute(), data_path)
def prepareData(self):
""" Method that read each txt file and joins them.
Returns:
DataFrame: Including the joined files with columns 'Message' and 'Target'
"""
df_yelp = pd.read_table(os.path.join(self.data_path,'yelp_labelled.txt'))
df_imdb = pd.read_table(os.path.join(self.data_path,'imdb_labelled.txt'))
df_amz = pd.read_table(os.path.join(self.data_path,'amazon_cells_labelled.txt'))
# Concatenate our Datasets
frames = [df_yelp,df_imdb,df_amz]
for column in frames:
column.columns = ["Message","Target"]
df_reviews = pd.concat(frames)
return df_reviews
def spacy_tokenizer(self,doc):
"""Function that serves as tokenizer in our pipeline
Loads the 'en_core_web_sm' model, tokenize the string and perform pre processing.
Preprocessing includes lemmatizing tokens as well as removing stop words and punctuations.
Args:
doc(str): sentence to tokenize.
Returns:
list: preprocessed tokens.
"""
punctuations = string.punctuation
nlp = spacy.load('en_core_web_sm')
stop_words = spacy.lang.en.stop_words.STOP_WORDS
tokens = nlp(doc)
# Lemmatizing each token and converting each token into lowercase
tokens = [word.lemma_.lower() for word in tokens if not word.is_space]
# Removing stop words and punctuations
tokens = [ word for word in tokens if word not in stop_words and word not in punctuations ]
# return preprocessed list of tokens
return tokens
def train(self):
"""Function that performs a pipeline execution.
This function creates a Pipeline instance. Splits the data into train/test and pass it through the pipeline.
It also saves the model as pickle file once training is over.
"""
df_reviews = self.prepareData()
tfvectorizer = TfidfVectorizer(tokenizer = self.spacy_tokenizer)
classifier_LG = LogisticRegression(verbose=True)
pipe2_LG = Pipeline([
('vectorizer', tfvectorizer),
('classifier', classifier_LG)], verbose=True)
# pipe2_LG = Pipeline([
# ("cleaner", predictors()),
# ('vectorizer', tfvectorizer),
# ('classifier', classifier_LG)], verbose=True)
X = df_reviews['Message']
ylabels = df_reviews['Target']
X_train, X_test, y_train, y_test = train_test_split(X, ylabels, test_size=0.3, random_state=42)
pipe2_LG.fit(X_train,y_train)
# Save the model
model_path = os.path.join(str(pathlib.Path().absolute()), "model")
model_file = model_path + "/logreg_tfidf.pkl"
if not os.path.isdir(model_path):
os.makedirs(model_path)
dump(pipe2_LG, open(model_file, 'wb'))
class PredictSentiment(object):
""" Class to load the model and build the tokens DataFrame
This class will load the model using the pickle file. So it can be used by the predict method.
"""
def __init__(self):
#model_path = os.path.join(str(pathlib.Path().absolute()), "model")
#model_file = model_path + "/forest_reg.pkl"
#self.model = load(open(model_file, 'rb'))
self.model = joblib.load("model/logreg_tfidf.pkl")
def buildDF(self, sentence):
"""Generate DataFrame with tokens and coefficients.
Args:
sentence(str): sentence to tokenize.
Returns:
DataFrame: containing tokens used for prediction and corresponding coeficients,
"""
tokens = SentimentTrain("Data").spacy_tokenizer(sentence[0])
arr=[]
for token in tokens:
idx = self.model.steps[1][1].vocabulary_.get(token)
coef = self.model.steps[2][1].coef_[0][idx]
arr.append({'TOKEN':token, 'Coef':coef})
return | pd.DataFrame(arr) | pandas.DataFrame |
# Source: AdvDSI-Lab2-Exercise1-Solutions.ipynb
# Author: <NAME>
def score_model(X, y, set_name=None, model=None):
"""Print regular performance statistics for the provided data
Parameters
----------
y_preds : Numpy Array
Predicted target
y_actuals : Numpy Array
Actual target
set_name : str
Name of the set to be printed
Returns
-------
"""
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
y_preds = model.predict(X)
y_predict_proba = model.predict_proba(X)[:, 1]
perf_accuracy = accuracy_score(y, y_preds)
perf_mse = mse(y, y_preds, squared=False)
perf_mae = mae(y, y_preds)
perf_precision = precision_score(y, y_preds)
perf_recall = recall_score(y, y_preds)
perf_F1 = f1_score(y, y_preds)
perf_AUC = roc_auc_score(y, y_predict_proba)
# print(f'ROC-AUC {set_name}: { roc_auc_score(y_preds, model.predict_proba(y_preds)[:, 1])}')
model_scores = []
model_provided = model
model_scores.append([set_name, perf_accuracy, perf_mse, perf_mae, perf_precision, perf_recall, perf_F1,perf_AUC])
df_model_scores = pd.DataFrame (model_scores, columns = ['Set Name','ACC','MSE','MAE','PREC','RECALL','F1','AUC'])
return df_model_scores
# New NULL Model
def score_null_model(y_train, y_base, set_name = None):
"""Print regular performance statistics for the provided data
Parameters
----------
y_train : Numpy Array
Predicted target
y_base : Numpy Array
Actual target
set_name : str
Name of the set to be printed
model : str
Model to be used
Returns
-------
"""
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
perf_accuracy = accuracy_score(y_base, y_train)
perf_mse = mse(y_base, y_train, squared=False)
perf_mae = mae(y_base, y_train)
perf_precision = precision_score(y_base, y_train)
perf_recall = recall_score(y_base, y_train)
perf_F1 = f1_score(y_base, y_train)
perf_AUC = None #roc_auc_score(y_base, y_predict_proba)
# print(f'ROC-AUC {set_name}: { roc_auc_score(y_preds, model.predict_proba(y_preds)[:, 1])}')
model_scores = []
model_scores.append([set_name, perf_accuracy, perf_mse, perf_mae, perf_precision, perf_recall, perf_F1,perf_AUC])
df_model_scores = pd.DataFrame (model_scores, columns = ['Set Name','ACC','MSE','MAE','PREC','RECALL','F1','AUC'])
return df_model_scores
def score_models(X_train = None, y_train = None, X_val = None, y_val = None, y_base = None, includeBase = False, model = None):
"""Score Models and return results as a dataframe
Parameters
----------
X_train : Numpy Array
X_train data
y_train : Numpy Array
Train target
X_val : Numpy Array
X_val data
y_val : Numpy Array
Val target
includeBase: Boolean
Calculate and display baseline
model: model
Model passed into function
Returns
-------
"""
import pandas as pd
import numpy as np
df_model_scores = pd.DataFrame()
if includeBase == True:
df_model_scores_base = score_null_model(y_train = y_train, y_base = y_base, set_name='Base')
df_model_scores = pd.concat([df_model_scores,df_model_scores_base],ignore_index = True, axis=0)
if X_train.size > 0:
df_model_scores_train = score_model(X_train, y_train, set_name='Train', model=model)
df_model_scores = pd.concat([df_model_scores,df_model_scores_train],ignore_index = True, axis=0)
if X_val.size > 0:
df_model_scores_val = score_model(X_val, y_val, set_name='Validate', model=model)
df_model_scores = pd.concat([df_model_scores,df_model_scores_val],ignore_index = True, axis=0)
display(df_model_scores)
return
def score_models2(X_train = None, y_train = None, X_val = None, y_val = None, X_test = None, y_test = None, y_base = None, includeBase = False, model = None):
"""Score Models and return results as a dataframe
Parameters
----------
X_train : Numpy Array
X_train data
y_train : Numpy Array
Train target
X_val : Numpy Array
X_val data
y_val : Numpy Array
Val target
X_test : Numpy Array
X_test data
y_test : Numpy Array
Test target
includeBase: Boolean
Calculate and display baseline
model: model
Model passed into function
Returns
-------
"""
import pandas as pd
import numpy as np
df_model_scores = pd.DataFrame()
if includeBase == True:
df_model_scores_base = score_null_model(y_train = y_train, y_base = y_base, set_name='Base')
df_model_scores = pd.concat([df_model_scores,df_model_scores_base],ignore_index = True, axis=0)
if X_train.size > 0:
df_model_scores_train = score_model(X_train, y_train, set_name='Train', model=model)
df_model_scores = pd.concat([df_model_scores,df_model_scores_train],ignore_index = True, axis=0)
if X_val.size > 0:
df_model_scores_val = score_model(X_val, y_val, set_name='Validate', model=model)
df_model_scores = pd.concat([df_model_scores,df_model_scores_val],ignore_index = True, axis=0)
if X_test.size > 0:
df_model_scores_test = score_model(X_test, y_test, set_name='Test', model=model)
df_model_scores = | pd.concat([df_model_scores,df_model_scores_test],ignore_index = True, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""Reads and prepares triples/statements for INDRA.
Run with:
python -m src.stonkgs.data.indra_extraction
"""
import json
import logging
import os
from typing import Any, Dict, List, Tuple
import networkx as nx
import pandas as pd
import pybel
from pybel.constants import (
ANNOTATIONS,
EVIDENCE,
RELATION,
CITATION,
INCREASES,
DIRECTLY_INCREASES,
DECREASES,
DIRECTLY_DECREASES,
REGULATES,
BINDS,
CORRELATION,
NO_CORRELATION,
NEGATIVE_CORRELATION,
POSITIVE_CORRELATION,
ASSOCIATION,
PART_OF,
)
from pybel.dsl import (
CentralDogma,
ComplexAbundance,
Abundance,
CompositeAbundance,
MicroRna,
BaseConcept,
ListAbundance,
Reaction,
)
from tqdm import tqdm
from stonkgs.constants import (
INDRA_RAW_JSON,
MISC_DIR,
PRETRAINING_DIR,
SPECIES_DIR,
CELL_LINE_DIR,
LOCATION_DIR,
DISEASE_DIR,
RELATION_TYPE_DIR,
)
logger = logging.getLogger(__name__)
DIRECT_RELATIONS = {
DIRECTLY_INCREASES,
DIRECTLY_DECREASES,
BINDS,
}
INDIRECT_RELATIONS = {
REGULATES,
CORRELATION,
DECREASES,
INCREASES,
NO_CORRELATION,
NEGATIVE_CORRELATION,
POSITIVE_CORRELATION,
ASSOCIATION,
PART_OF,
}
UP_RELATIONS = {INCREASES, POSITIVE_CORRELATION, DIRECTLY_INCREASES}
DOWN_RELATIONS = {DECREASES, NEGATIVE_CORRELATION, DIRECTLY_DECREASES}
def binarize_triple_direction(
graph: pybel.BELGraph, triples_per_class: int = 25000
) -> Tuple[Dict[str, Any], List]:
"""Binarize triples depending on the type of direction.
Extract the fine-tuning data for the interaction type (direct vs. indirect) and polarity (up vs. down) tasks.
"""
triples = []
edges_to_removes = []
counter_dir_inc = 0
counter_dir_dec = 0
counter_inc = 0
counter_dec = 0
summary = {"context": "(in)direct relations and polarity"}
# Iterate through the graph and infer a subgraph
for u, v, k, data in graph.edges(keys=True, data=True):
if EVIDENCE not in data or not data[EVIDENCE] or data[EVIDENCE] == "No evidence text.":
# logger.warning(f'not evidence found in {data}')
continue
# Both nodes in the triple are required to be a protein/gene (complexes and other stuff are skipped)
if not isinstance(u, CentralDogma) and not isinstance(v, CentralDogma):
continue
if data[RELATION] in UP_RELATIONS:
polarity_label = "up"
elif data[RELATION] in DOWN_RELATIONS:
polarity_label = "down"
else:
continue
if data[RELATION] in {INCREASES, DECREASES}:
interaction_label = "indirect_interaction"
elif data[RELATION] in {DIRECTLY_INCREASES, DIRECTLY_DECREASES}:
interaction_label = "direct_interaction"
else:
continue
"""Check if limit has been reached"""
if data[RELATION] == DIRECTLY_DECREASES and counter_dir_dec >= triples_per_class:
continue
elif data[RELATION] == INCREASES and counter_inc >= triples_per_class:
continue
elif data[RELATION] == DECREASES and counter_dec >= triples_per_class:
continue
elif data[RELATION] == DIRECTLY_INCREASES and counter_dir_inc >= triples_per_class:
continue
# Add particular triple to the fine tuning set
if data[RELATION] == INCREASES:
counter_inc += 1
elif data[RELATION] == DIRECTLY_INCREASES:
counter_dir_inc += 1
elif data[RELATION] == DIRECTLY_DECREASES:
counter_dir_dec += 1
elif data[RELATION] == DECREASES:
counter_dec += 1
else:
continue
triples.append(
{
"source": u,
"relation": data[RELATION],
"target": v,
"evidence": data[EVIDENCE],
"pmid": data[CITATION],
"polarity": polarity_label,
"interaction": interaction_label,
}
)
edges_to_removes.append((u, v, k))
df = pd.DataFrame(triples)
logger.info(f"Number of binarized triples for fine-tuning: {df.shape[0]}")
summary["number_of_triples"] = df.shape[0]
summary["number_of_labels"] = "4 or 2 depending on the task"
summary["labels"] = "NA"
df.to_csv(os.path.join(RELATION_TYPE_DIR, f"relation_type.tsv"), sep="\t", index=False)
return summary, edges_to_removes
def create_polarity_annotations(graph: pybel.BELGraph) -> Dict[str, Any]:
"""Group triples depending on the type of polarity."""
triples = []
summary = {"context": "polarity"}
# Iterate through the graph and infer a subgraph
for u, v, data in graph.edges(data=True):
if EVIDENCE not in data or not data[EVIDENCE] or data[EVIDENCE] == "No evidence text.":
logger.warning(f"not evidence found in {data}")
continue
# todo: check this we will focus only on molecular interactions
if not any(
isinstance(u, class_to_check)
for class_to_check in (
CentralDogma,
ComplexAbundance,
Abundance,
CompositeAbundance,
MicroRna,
)
):
continue
if not any(
isinstance(v, class_to_check)
for class_to_check in (
CentralDogma,
ComplexAbundance,
Abundance,
CompositeAbundance,
MicroRna,
)
):
continue
class_label = "indirect" if data[RELATION] in INDIRECT_RELATIONS else "direct"
triples.append(
{
"source": u,
"relation": data[RELATION],
"target": v,
"evidence": data[EVIDENCE],
"pmid": data[CITATION],
"class": class_label,
}
)
df = pd.DataFrame(triples)
summary["number_of_triples"] = df.shape[0]
summary["number_of_labels"] = df["class"].unique().size
summary["labels"] = df["class"].value_counts().to_dict()
df.to_csv(os.path.join(RELATION_TYPE_DIR, f"relation_type.tsv"), sep="\t", index=False)
return summary
def create_context_type_specific_subgraph(
graph: pybel.BELGraph,
context_annotations: List[str],
) -> Tuple[List, pybel.BELGraph]:
"""Create a subgraph based on context annotations and also return edges that should be removed later on."""
subgraph = graph.child()
subgraph.name = f"INDRA graph contextualized for {context_annotations}"
edges_to_remove = []
# Iterate through the graph and infer a subgraph with edges that contain the annotation of interest
for u, v, k, data in graph.edges(data=True, keys=True):
if ANNOTATIONS in data and any(
annotation in data[ANNOTATIONS] for annotation in context_annotations
):
subgraph.add_edge(u, v, k, **data)
# Triples to be removed
edges_to_remove.append((u, v, k))
# number_of_edges_before = graph.number_of_edges()
# graph.remove_edges_from(edges_to_remove)
# number_of_edges_after_removing_annotations = graph.number_of_edges()
# logger.info(
# f'Original graph was reduced from {number_of_edges_before} to {number_of_edges_after_removing_annotations}
# edges'
# )
string = f'Number of nodes/edges in the inferred subgraph "{context_annotations}": \
{subgraph.number_of_nodes()} {subgraph.number_of_edges()}'
logger.info(string)
return edges_to_remove, subgraph
def dump_edgelist(
graph: pybel.BELGraph,
annotations: List[str],
name: str,
output_dir: str,
) -> Dict[str, Any]:
"""Dump tsv file for ml purposes."""
triples = []
summary = {
"context": name,
}
# Iterate through the graph and infer a subgraph with edges that contain the annotation of interest
for u, v, data in graph.edges(data=True):
# If the data entry has no text evidence or the following filler text, don't add it
if not data[EVIDENCE] or data[EVIDENCE] == "No evidence text.":
continue
# Multiple annotations
for annotation in data[ANNOTATIONS]:
if annotation not in annotations:
continue
# Skip multiple classes in the triple for the same annotation
if len(data[ANNOTATIONS][annotation]) > 1:
logger.warning(f"triple has more than one label -> {data[ANNOTATIONS][annotation]}")
continue
for label_annotation in data[ANNOTATIONS][annotation]:
triples.append(
{
"source": u,
"relation": data[RELATION],
"target": v,
"evidence": data[EVIDENCE],
"pmid": data[CITATION],
"class": label_annotation,
},
)
if not triples:
return {
"context": name,
"number_of_triples": "0",
"number_of_labels": "0",
"labels": "0",
}
df = | pd.DataFrame(triples) | pandas.DataFrame |
"""
exceltools - providing more user-friendly access to the pywin32 library
=============================================================================
exceltools is a Python module acting as a friendlier interface to the pywin32 library which
in itself is an API to the Windows COM client API.
exceltools does not provide the full functionality of pywin32, it only seeks to simplify some commonly used code.
exceltools is intended to work alongside pandas and numpy and aids in creating and
populating spreadsheets programmatically.
"""
import os
import re
import sys
import shutil
import warnings
import datetime as dt
from time import sleep
from pathlib import Path
# External Dependencies
import pythoncom
import numpy as np
import pandas as pd
import pandas.api.types as types
from win32com import client
from win32com.client import constants as c
# Local modules
import exceltools.errors as err
from exceltools.range import Range
from exceltools.column import Column
from exceltools.cell import CellReference
from exceltools.utils import col2num, num2col, excel_date, rgb2hex
class ExcelSpreadSheet:
"""
A class built to simplify and streamline working with the win32client library.
Example usage involves opening an existing workbook and saving a new copy without changing the original.
New workbooks can also be created and originals can be overwritten.
Example Usage:
excel = ExcelSpreadSheet()
excel.open("C:/Users/generic_user/Documents/master_file.xlsx")
excel.write_dataframe(data, sheet="Sheet 1", start_col=1, start_row=2, headers=True)
excel.write_cell("SomeString", sheet=1, row=1, col="A")
excel.save_xlsx("C:/Users/generic_user/Documents/new_file.xlsx")
excel.close(save_changes=False)
"""
def __init__(self):
global client
try:
self.excel = client.gencache.EnsureDispatch("Excel.Application")
except Exception:
# Remove cache and try again.
module_list = [m.__name__ for m in sys.modules.values()]
for module in module_list:
if re.match(r"win32com\.gen_py\..+", module):
del sys.modules[module]
shutil.rmtree(os.path.join(os.environ.get("LOCALAPPDATA"), "Temp", "gen_py"))
from win32com import client
self.excel = client.gencache.EnsureDispatch("Excel.Application")
self.wb = None
self._wb_open = 0
self.active_sheet = None
self.sheet_names = []
self.null_arg = pythoncom.Empty
self._wb_open = 0
self.format_args = {
"Condition": {
"logic": "logic_dict[logic]",
"value": "value",
"value2": "value2"
},
"Format": {
"interior_colour": "Interior.Color = self.rgb2hex(kwargs['interior_colour'])",
"number_format": "NumberFormat = kwargs['number_format']",
"bold": "Font.Bold = kwargs['bold']",
"font_colour": "Font.Color = self.rgb2hex(kwargs['font_colour'])",
"font_size": "Font.Size = kwargs['font_size']",
"font_name": "Font.Name = kwargs['font_name']",
"orientation": "Orientation = kwargs['orientation']",
"underline": "Font.Underline = kwargs['underline']",
"merge": "MergeCells = kwargs['merge']",
"wrap_text": "WrapText = kwargs['wrap_text']",
"h_align": "HorizontalAlignment = kwargs['h_align']",
"v_align": "VerticalAlignment = kwargs['v_align']",
"border_left": {
"line_style": "Borders(c.xlEdgeLeft).LineStyle = kwargs['border_left']['line_style']",
"weight": "Borders(c.xlEdgeLeft).Weight = kwargs['border_left']['weight']",
"colour": "Borders(c.xlEdgeLeft).Color = self.rgb2hex(kwargs['border_left']['colour'])",
},
"border_right": {
"line_style": "Borders(c.xlEdgeRight).LineStyle = kwargs['border_right']['line_style']",
"weight": "Borders(c.xlEdgeRight).Weight = kwargs['border_right']['weight']",
"colour": "Borders(c.xlEdgeRight).Color = self.rgb2hex(kwargs['border_right']['colour'])",
},
"border_top": {
"line_style": "Borders(c.xlEdgeTop).LineStyle = kwargs['border_top']['line_style']",
"weight": "Borders(c.xlEdgeTop).Weight = kwargs['border_top']['weight']",
"colour": "Borders(c.xlEdgeTop).Color = self.rgb2hex(kwargs['border_top']['colour'])",
},
"border_bot": {
"line_style": "Borders(c.xlEdgeBottom).LineStyle = kwargs['border_bot']['line_style']",
"weight": "Borders(c.xlEdgeBottom).Weight = kwargs['border_bot']['weight']",
"colour": "Borders(c.xlEdgeBottom).Color = self.rgb2hex(kwargs['border_bot']['colour'])",
},
"border_inside_h": {
"line_style": "Borders(c.xlInsideHorizontal).LineStyle = kwargs['border_inside_h']['line_style']",
"weight": "Borders(c.xlInsideHorizontal).Weight = kwargs['border_inside_h']['weight']",
"colour": "Borders(c.xlInsideHorizontal).Color = self.rgb2hex(kwargs['border_inside_h']['colour'])",
},
"border_inside_v": {
"line_style": "Borders(c.xlInsideVertical).LineStyle = kwargs['border_inside_v']['line_style']",
"weight": "Borders(c.xlInsideVertical).Weight = kwargs['border_inside_v']['weight']",
"colour": "Borders(c.xlInsideVertical).Color = self.rgb2hex(kwargs['border_inside_v']['colour'])",
}
}
}
@staticmethod
def col2num(col_str: str) -> int:
"""
Convert an Excel column string to an integer -> "A" == 1, "AA" == 27 e.t.c.
"""
return col2num(col_str)
@staticmethod
def num2col(col_int: int) -> str:
"""
Convert an Excel column index to a string -> 1 == "A", 27 == "AA" e.t.c.
"""
return num2col(col_int)
@staticmethod
def rgb2hex(rgb: list | tuple) -> int:
"""
Excel expects a hex value in order to fill cells
This function allows you to supply standard RGB values to be converted to hex.
"""
return rgb2hex(rgb)
@staticmethod
def excel_date(date: pd.Series | dt.datetime | dt.date) -> float:
"""
Convert a datetime.datetime or pandas.Series object into an Excel date float
"""
return excel_date(date)
def _validate_workbook(self):
"""
Ensure the current workbook is open and valid
"""
if self._wb_open == 0:
raise err.NoWorkbookError()
def _validate_worksheet(self, sheet):
"""
Make sure the sheet supplied is valid for the current open workbook
"""
if isinstance(sheet, str):
if sheet not in self.sheet_names:
raise err.InvalidSheetError(f"A sheet with the name {sheet} does not exist")
elif isinstance(sheet, int):
if len(self.sheet_names) < sheet:
raise err.InvalidSheetError(f"Invalid Sheet Index. Sheet index {sheet} is out of bounds.")
def _cleanse_data(self, data):
"""
Excel will print np.Nan as 65535.
This function aims to cleanse any representations of NULL so that they print as expected to Excel.
At this stage we also attempt to convert datetimes to a numeric value used by Excel.
"""
if isinstance(data, pd.DataFrame):
for column in data:
_dtype = data[column].dtype
if types.is_numeric_dtype(_dtype):
data.loc[:, column] = data[column].fillna(0)
if types.is_string_dtype(_dtype):
data.loc[:, column] = data[column].fillna("")
if types.is_datetime64_any_dtype(_dtype):
data.loc[:, column] = self.excel_date(data[column])
elif isinstance(data, (pd.Series, list)):
_dtype = | pd.Series(data) | pandas.Series |
"""
Objects used to store and manage metabolomics data
Objects
-------
- DataContainer: Stores metabolomics data.
Exceptions
----------
- BatchInformationError
- RunOrderError
- ClassNameError
- EmptyDataContainerError
Usage
-----
DataContainers can be created in two different ways other than using the
constructor:
- Using the functions in the fileio module to read data processed with a third
party software (XCMS, MZMine2, etc...)
- Performing Feature correspondence algorithm on features detected from raw
data (not implemented yet...)
"""
from . import utils
from . import validation
from . import fileio
from ._names import *
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from typing import List, Optional, Iterable, Union, BinaryIO, TextIO
import bokeh.plotting
import pickle
from bokeh.palettes import Category10
from bokeh.models import ColumnDataSource
from bokeh.transform import factor_cmap
from bokeh.models import LabelSet
import seaborn as sns
class DataContainer(object):
"""
Container object that stores processed metabolomics data.
The data is separated in three attributes: data_matrix, sample_metadata and
feature_metadata. Each one is a pandas DataFrame. DataContainers can be
created, apart from using the constructor, importing data in common formats
(such as: XCMS, MZMine2, Progenesis, etc..) static methods.
Attributes
----------
data_matrix : DataFrame.
feature values for each sample. Data is organized in a "tidy" way:
each row is an observation, each column is a feature. dtype must
be float and all values should be non negative, but NANs are fine.
sample_metadata : DataFrame.
Metadata associated to each sample (eg: sample class). Has the same
index as the data_matrix. `class` (standing for sample class) is a
required column. Analytical batch and run order information can be
included under the `batch` and `order` columns. Both must be integer
numbers, and the run order must be unique for each sample. If the
run order is specified in a per-batch fashion, the values will be
converted to a unique value.
feature_metadata : DataFrame.
Metadata associated to each feature (eg: mass to charge ratio (mz),
retention time (rt), etc...). The index is equal to the `data_matrix`
column. "mz" and "rt" are required columns.
mapping : dictionary of sample types to a list of sample classes.
Maps sample types to sample classes. valid samples types are `qc`,
`blank`, `sample` or `suitability`. values are list of sample classes.
Mapping is used by Processor objects to define a default behaviour. For
example, when using a BlankCorrector, the blank contribution to each
feature is estimated using the sample classes that are values of the
`blank` sample type.
metrics : methods to compute common feature metrics.
plot : methods to plot features.
preprocess : methods to perform common preprocessing tasks.
id
batch
order
Methods
-------
remove(remove, axis) : Remove samples/features from the DataContainer.
reset(reset_mapping=True) : Reset the DataContainer, ie: recover removed
samples/features, transformed values.
is_valid_class_name(value) : checks if a class is present in the
DataContainer
diagnose() : creates a dictionary with information about the status of the
DataContainer. Used by Processor objects as a validity check.
select_features(mz, rt, mz_tol=0.01, rt_tol=5) : Search features within
a m/z and rt tolerance.
set_default_order() : Assigns a default run order of the samples assuming
that the data matrix is sorted by run order already.
sort(field, axis) : sort features/samples using metadata information.
save(filename) : save the DataContainer as a pickle.
See Also
--------
from_progenesis
from_pickle
MetricMethods
PlotMethods
PreprocessMethods
"""
def __init__(self, data_matrix: pd.DataFrame,
feature_metadata: pd.DataFrame,
sample_metadata: pd.DataFrame,
mapping: Optional[dict] = None,
plot_mode: str = "bokeh"):
"""
See help(DataContainer) for more details
Parameters
----------
data_matrix : pandas.DataFrame.
Feature values for each measured sample. Each row is a sample and
each column is a feature.
sample_metadata : pandas.DataFrame.
Metadata for each sample. class is a required column.
feature_metadata : pandas.DataFrame.
DataFrame with features names as indices. mz and rt are required
columns.
mapping : dict or None
if dict, set each sample class to sample type.
plot_mode : {"seaborn", "bokeh"}
The package used to generate plots with the plot methods
"""
validation.validate_data_container(data_matrix, feature_metadata,
sample_metadata)
# check and convert order and batch information
try:
order = sample_metadata.pop(_sample_order)
try:
batch = sample_metadata.pop(_sample_batch)
except KeyError:
batch = pd.Series(data=np.ones_like(order.values),
index=order.index)
order = _convert_to_interbatch_order(order, batch)
sample_metadata[_sample_order] = order
sample_metadata[_sample_batch] = batch
except KeyError:
pass
# values are copied to prevent that modifications on the original
# objects affect the DataContainer attributes
self.data_matrix = data_matrix.copy()
self.feature_metadata = feature_metadata.copy()
self.sample_metadata = sample_metadata.copy()
self._sample_mask = data_matrix.index.copy()
self._feature_mask = data_matrix.columns.copy()
self.mapping = mapping
self.id = data_matrix.index
self.plot = None
# copy back up data for resetting
self._original_data_matrix = self.data_matrix.copy()
self._original_sample_metadata = self.sample_metadata.copy()
self._original_feature_metadata = self.feature_metadata.copy()
# adding methods
self.metrics = MetricMethods(self)
self.preprocess = PreprocessMethods(self)
self.set_plot_mode(plot_mode)
@property
def data_matrix(self) -> pd.DataFrame:
return self._data_matrix.loc[self._sample_mask, self._feature_mask]
@data_matrix.setter
def data_matrix(self, value: pd.DataFrame):
self._data_matrix = value
@property
def feature_metadata(self) -> pd.DataFrame:
return self._feature_metadata.loc[self._feature_mask, :]
@feature_metadata.setter
def feature_metadata(self, value: pd.DataFrame):
self._feature_metadata = value
@property
def sample_metadata(self) -> pd.DataFrame:
return self._sample_metadata.loc[self._sample_mask, :]
@sample_metadata.setter
def sample_metadata(self, value: pd.DataFrame):
self._sample_metadata = value
@property
def mapping(self):
return self._mapping
@mapping.setter
def mapping(self, mapping: dict):
self._mapping = _make_empty_mapping()
if mapping is not None:
valid_samples = self.classes.unique()
_validate_mapping(mapping, valid_samples)
self._mapping.update(mapping)
@property
def id(self) -> pd.Series:
"""pd.Series[str] : name id of each sample."""
return self._sample_metadata.loc[self._sample_mask, _sample_id]
@id.setter
def id(self, value: pd.Series):
self._sample_metadata.loc[self._sample_mask, _sample_id] = value
@property
def classes(self) -> pd.Series:
"""pd.Series[str] : class of each sample."""
return self._sample_metadata.loc[self._sample_mask, _sample_class]
@classes.setter
def classes(self, value: pd.Series):
self._sample_metadata.loc[self._sample_mask, _sample_class] = value
@property
def batch(self) -> pd.Series:
"""pd.Series[int]. Analytical batch number"""
try:
return self._sample_metadata.loc[self._sample_mask, _sample_batch]
except KeyError:
raise BatchInformationError("No batch information available.")
@batch.setter
def batch(self, value: pd.Series):
self._sample_metadata.loc[self._sample_mask,
_sample_batch] = value.astype(int)
@property
def order(self) -> pd.Series:
"""
pd.Series[int] : Run order in which samples were analyzed. It must be
an unique integer for each sample.
"""
try:
return self._sample_metadata.loc[self._sample_mask, _sample_order]
except KeyError:
raise RunOrderError("No run order information available")
@order.setter
def order(self, value: pd.Series):
if utils.is_unique(value):
self._sample_metadata.loc[self._sample_mask,
_sample_order] = value.astype(int)
else:
msg = "order values must be unique"
raise ValueError(msg)
@property
def dilution(self) -> pd.Series:
try:
return self._sample_metadata.loc[self._sample_mask,
_sample_dilution]
except KeyError:
msg = "No dilution information available."
raise DilutionInformationError(msg)
@dilution.setter
def dilution(self, value):
self._sample_metadata.loc[self._sample_mask, _sample_dilution] = value
def is_valid_class_name(self, test_class: Union[str, List[str]]) -> bool:
"""
Check if at least one sample class is`class_name`.
Parameters
----------
test_class : str or list[str]
classes to search in the DataContainer.
Returns
-------
is_valid : bool
"""
valid_classes = self.classes.unique()
if isinstance(test_class, str):
return test_class in valid_classes
else:
for c in test_class:
if not (c in valid_classes):
return False
return True
def remove(self, remove: Iterable[str], axis: str):
"""
Remove selected features / samples
Parameters
----------
remove : Iterable[str]
List of sample/feature names to remove.
axis : {"features", "samples"}
"""
if not self._is_valid(remove, axis):
msg = "Some samples/features aren't in the DataContainer"
raise ValueError(msg)
if axis == "features":
self._feature_mask = self._feature_mask.difference(remove)
elif axis == "samples":
self._sample_mask = self._sample_mask.difference(remove)
def _is_valid(self, index: Iterable[str], axis: str) -> bool:
"""
Check if all samples/features are present in the DataContainer.
Parameters
----------
index: list[str]
List of feature/sample names to check.
axis: {"samples", "features"}
"""
ind = pd.Index(index)
if axis == "features":
return ind.isin(self.data_matrix.columns).all()
elif axis == "samples":
return ind.isin(self.data_matrix.index).all()
else:
msg = "axis must be `features` or `samples`."
raise ValueError(msg)
def diagnose(self) -> dict:
"""
Check if DataContainer has information to perform several correction
types
Returns
-------
diagnostic : dict
Each value is a bool indicating the status. `empty` is True if the
size in at least one dimension of the data matrix is zero; "missing"
is True if there are NANs in the data matrix; "order" is True
if there is run order information for the samples; "batch" is True
if there is batch number information associated to the samples.
"""
diagnostic = dict()
diagnostic["empty"] = self.data_matrix.empty
diagnostic["missing"] = self.data_matrix.isna().any().any()
diagnostic[_qc_sample_type] = bool(self.mapping[_qc_sample_type])
diagnostic[_blank_sample_type] = bool(self.mapping[_blank_sample_type])
diagnostic[_study_sample_type] = bool(self.mapping[_study_sample_type])
diagnostic[_dilution_qc_type] = bool(self.mapping[_dilution_qc_type])
try:
diagnostic[_sample_order] = self.order.any()
except RunOrderError:
diagnostic[_sample_order] = False
try:
diagnostic[_sample_batch] = self.batch.any()
except BatchInformationError:
diagnostic[_sample_batch] = False
return diagnostic
def reset(self, reset_mapping: bool = True):
"""
Reloads the original data matrix.
Parameters
----------
reset_mapping: bool
If True, clears sample classes from the mapping.
"""
self._sample_mask = self._original_data_matrix.index
self._feature_mask = self._original_data_matrix.columns
self.data_matrix = self._original_data_matrix
self.sample_metadata = self._original_sample_metadata
self.feature_metadata = self._original_feature_metadata
if reset_mapping:
self.mapping = None
def select_features(self, mzq: float, rtq: float, mz_tol: float = 0.01,
rt_tol: float = 5) -> pd.Index:
"""
Find feature names within the defined mass-to-charge and retention time
tolerance.
Parameters
----------
mzq: positive number
Mass-to-charge value to search
rtq: positive number
Retention time value to search
mz_tol: positive number
Mass-to-charge tolerance used in the search.
rt_tol: positive number
Retention time tolerance used in the search.
Returns
-------
Index
"""
mz_match = (self.feature_metadata["mz"] - mzq).abs() < mz_tol
rt_match = (self.feature_metadata["rt"] - rtq).abs() < rt_tol
mz_match_ft = mz_match[mz_match].index
rt_match_ft = rt_match[rt_match].index
result = mz_match_ft.intersection(rt_match_ft)
return result
def set_default_order(self):
"""
set the order of the samples, assuming that de data is already sorted.
"""
order_data = np.arange(1, self.sample_metadata.shape[0] + 1)
ind = self.data_matrix.index
order = pd.Series(data=order_data, index=ind, dtype=int)
batch = pd.Series(data=1, index=ind, dtype=int)
self.order = order
self.batch = batch
def sort(self, field: str, axis: str):
"""
Sort samples/features in place using metadata values.
Parameters
----------
field: str
field to sort by. Must be a column of `sample_metadata` or
`feature_metadata`.
axis: {"samples", "features"}
"""
if axis == "samples":
tmp = self._sample_metadata.sort_values(field).index
self._sample_mask = tmp.intersection(self._sample_mask)
# self.sample_metadata = self.sample_metadata.loc[sorted_index, :]
# self.data_matrix = self.data_matrix.loc[sorted_index, :]
elif axis == "features":
tmp = self.feature_metadata.sort_values(field).index
self._feature_mask = tmp.intersection(self._feature_mask)
# self.feature_metadata = self.feature_metadata.loc[sorted_index, :]
# self.data_matrix = self.data_matrix.loc[:, sorted_index]
else:
msg = "axis must be `samples` or `features`"
raise ValueError(msg)
def save(self, filename: str) -> None:
"""
Save DataContainer into a pickle
Parameters
----------
filename: str
name used to save the file.
"""
with open(filename, "wb") as fin:
pickle.dump(self, fin)
def set_plot_mode(self, mode: str):
"""
Set the library used to generate plots.
Parameters
----------
mode: {"bokeh", "seaborn"}
"""
if mode == "bokeh":
self.plot = BokehPlotMethods(self)
elif mode == "seaborn":
self.plot = SeabornPlotMethods(self)
else:
msg = "plot mode must be `seaborn` or `bokeh`"
raise ValueError(msg)
def add_order_from_csv(self, path: Union[str, TextIO],
interbatch_order: bool = True) -> None:
"""
adds sample order and sample batch using information from a csv file.
A column with the name `sample` with the same values as the index of
the DataContainer sample_metadata must be provided.
order information is taken from a column with name `order` and the same
is done with batch information. order data must be positive integers
and each batch must have unique values. Each batch must be identified
with a positive integer.
Parameters
----------
path : str
path to the file with order data. Data format is inferred from the
file extension.
interbatch_order : bool
If True converts the order value to a unique value for the whole
DataContainer. This makes plotting the data as a function of order
easier.
"""
df = | pd.read_csv(path, index_col="sample") | pandas.read_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# check scalar comparison
expect_matches = pdsr == "a"
got_matches = sr == "a"
np.testing.assert_array_equal(
expect_matches.values, got_matches.to_array()
)
# mask series
expect_masked = pdsr[expect_matches]
got_masked = sr[got_matches]
assert len(expect_masked) == len(got_masked)
assert len(expect_masked) == got_masked.valid_count
assert_eq(got_masked, expect_masked)
def test_df_cat_set_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a")
pddf = df.to_pandas(nullable_pd_dtype=False)
expect = pddf.set_index("a")
assert_eq(got, expect)
def test_df_cat_sort_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a").sort_index()
expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index()
assert_eq(got, expect)
def test_cat_series_binop_error():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
dfa = df["a"]
dfb = df["b"]
# lhs is a categorical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfa, dfb],),
rfunc_args_and_kwargs=([dfa, dfb],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: add",
)
# if lhs is a numerical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfb, dfa],),
rfunc_args_and_kwargs=([dfb, dfa],),
check_exception_type=False,
expected_error_message="'add' operator not supported",
)
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_unique(num_elements):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), num_elements
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas())
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique_sorted = np.sort(pdf["a"].unique())
# verify
np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted)
@pytest.mark.parametrize("nelem", [20, 50, 100])
def test_categorical_unique_count(nelem):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), nelem
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_count = gdf["a"].nunique()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique = pdf["a"].unique()
# verify
assert gdf_unique_count == len(pdf_unique)
def test_categorical_empty():
cat = pd.Categorical([])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_array())
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
def test_categorical_set_categories():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
psr = pd.Series(cat)
sr = cudf.Series.from_categorical(cat)
# adding category
expect = psr.cat.set_categories(["a", "b", "c", "d"])
got = sr.cat.set_categories(["a", "b", "c", "d"])
assert_eq(expect, got)
# removing category
expect = psr.cat.set_categories(["a", "b"])
got = sr.cat.set_categories(["a", "b"])
assert_eq(expect, got)
def test_categorical_set_categories_preserves_order():
series = pd.Series([1, 0, 0, 0, 2]).astype("category")
# reassigning categories should preserve element ordering
assert_eq(
series.cat.set_categories([1, 2]),
cudf.Series(series).cat.set_categories([1, 2]),
)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_ordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(False))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(False))
assert cd_sr.cat.ordered is False
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_ordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_ordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is True
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_unordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(True))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(True))
assert cd_sr.cat.ordered is True
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_unordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_unordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is False
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("from_ordered", [True, False])
@pytest.mark.parametrize("to_ordered", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_reorder_categories(
pd_str_cat, from_ordered, to_ordered, inplace
):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(from_ordered))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(from_ordered))
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
kwargs = dict(ordered=to_ordered, inplace=inplace)
pd_sr_1 = pd_sr.cat.reorder_categories(list("cba"), **kwargs)
cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert_eq(pd_sr_1, cd_sr_1)
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_add_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.add_categories(["d"], inplace=inplace)
cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "d" in pd_sr_1.cat.categories.to_list()
assert "d" in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_remove_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.remove_categories(["a"], inplace=inplace)
cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "a" not in pd_sr_1.cat.categories.to_list()
assert "a" not in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
# test using ordered operators
assert_exceptions_equal(
lfunc=cd_sr.to_pandas().cat.remove_categories,
rfunc=cd_sr.cat.remove_categories,
lfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
rfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
expected_error_message="removals must all be in old categories",
)
def test_categorical_dataframe_slice_copy():
pdf = pd.DataFrame({"g": pd.Series(["a", "b", "z"], dtype="category")})
gdf = cudf.from_pandas(pdf)
exp = pdf[1:].copy()
gdf = gdf[1:].copy()
assert_eq(exp, gdf)
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
pd.Series([1, 2, 3, 89, 3, 1, 89], dtype="category"),
pd.Series(["1", "2", "3", "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", "9"], dtype="category"),
pd.Series(["1", "2", "3", None, "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", None, "9"], dtype="category"),
pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]),
pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]),
pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"),
pd.Series([1, 2, 3, 89], dtype="float64"),
pd.Series([1, 2.5, 3.001, 89], dtype="float64"),
pd.Series([None, None, None]),
pd.Series([], dtype="float64"),
],
)
@pytest.mark.parametrize(
"cat_type",
[
pd.CategoricalDtype(categories=["aa", "bb", "cc"]),
pd.CategoricalDtype(categories=[2, 4, 10, 100]),
pd.CategoricalDtype(categories=["aa", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "b", "c"]),
pd.CategoricalDtype(categories=["1", "2", "3", "4"]),
pd.CategoricalDtype(categories=["1.0", "2.5", "3.001", "9"]),
pd.CategoricalDtype(categories=[]),
],
)
def test_categorical_typecast(data, cat_type):
pd_data = data.copy()
gd_data = cudf.from_pandas(data)
assert_eq(pd_data.astype(cat_type), gd_data.astype(cat_type))
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]),
pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]),
pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"),
pd.Series([1, 2, 3, 89], dtype="float64"),
pd.Series([1, 2.5, 3.001, 89], dtype="float64"),
pd.Series([None, None, None]),
pd.Series([], dtype="float64"),
],
)
@pytest.mark.parametrize(
"new_categories",
[
["aa", "bb", "cc"],
[2, 4, 10, 100],
["aa", "bb", "c"],
["a", "bb", "c"],
["a", "b", "c"],
[],
pd.Series(["a", "b", "c"]),
pd.Series(["a", "b", "c"], dtype="category"),
pd.Series([-100, 10, 11, 0, 1, 2], dtype="category"),
],
)
def test_categorical_set_categories_categoricals(data, new_categories):
pd_data = data.copy().astype("category")
gd_data = cudf.from_pandas(pd_data)
assert_eq(
pd_data.cat.set_categories(new_categories=new_categories),
gd_data.cat.set_categories(new_categories=new_categories),
)
assert_eq(
pd_data.cat.set_categories(
new_categories=pd.Series(new_categories, dtype="category")
),
gd_data.cat.set_categories(
new_categories=cudf.Series(new_categories, dtype="category")
),
)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3, 4],
["a", "1", "2", "1", "a"],
pd.Series(["a", "1", "22", "1", "aa"]),
pd.Series(["a", "1", "22", "1", "aa"], dtype="category"),
pd.Series([1, 2, 3, -4], dtype="int64"),
pd.Series([1, 2, 3, 4], dtype="uint64"),
pd.Series([1, 2.3, 3, 4], dtype="float"),
[None, 1, None, 2, None],
[],
],
)
@pytest.mark.parametrize(
"dtype",
[
pd.CategoricalDtype(categories=["aa", "bb", "cc"]),
pd.CategoricalDtype(categories=[2, 4, 10, 100]),
pd.CategoricalDtype(categories=["aa", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "b", "c"]),
pd.CategoricalDtype(categories=["22", "b", "c"]),
pd.CategoricalDtype(categories=[]),
],
)
def test_categorical_creation(data, dtype):
expected = pd.Series(data, dtype=dtype)
got = cudf.Series(data, dtype=dtype)
assert_eq(expected, got)
got = cudf.Series(data, dtype=cudf.from_pandas(dtype))
assert_eq(expected, got)
expected = pd.Series(data, dtype="category")
got = cudf.Series(data, dtype="category")
assert_eq(expected, got)
@pytest.mark.parametrize(
"categories",
[
[],
[1, 2, 3],
pd.Series(["a", "c", "b"], dtype="category"),
pd.Series([1, 2, 3, 4, -100], dtype="category"),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_categorical_dtype(categories, ordered):
expected = pd.CategoricalDtype(categories=categories, ordered=ordered)
got = cudf.CategoricalDtype(categories=categories, ordered=ordered)
assert_eq(expected, got)
@pytest.mark.parametrize(
("data", "expected"),
[
(cudf.Series([1]), np.uint8),
(cudf.Series([1, None]), np.uint8),
(cudf.Series(np.arange(np.iinfo(np.int8).max)), np.uint8),
(
cudf.Series(np.append(np.arange(np.iinfo(np.int8).max), [None])),
np.uint8,
),
(cudf.Series(np.arange(np.iinfo(np.int16).max)), np.uint16),
(
cudf.Series(np.append(np.arange(np.iinfo(np.int16).max), [None])),
np.uint16,
),
(cudf.Series(np.arange(np.iinfo(np.uint8).max)), np.uint8),
(
cudf.Series(np.append(np.arange(np.iinfo(np.uint8).max), [None])),
np.uint8,
),
(cudf.Series(np.arange(np.iinfo(np.uint16).max)), np.uint16),
(
cudf.Series(np.append(np.arange(np.iinfo(np.uint16).max), [None])),
np.uint16,
),
],
)
def test_astype_dtype(data, expected):
got = data.astype("category").cat.codes.dtype
np.testing.assert_equal(got, expected)
@pytest.mark.parametrize(
"data,add",
[
([1, 2, 3], [100, 11, 12]),
([1, 2, 3], [0.01, 9.7, 15.0]),
([0.0, 6.7, 10.0], [100, 11, 12]),
([0.0, 6.7, 10.0], [0.01, 9.7, 15.0]),
(["a", "bd", "ef"], ["asdfsdf", "bddf", "eff"]),
([1, 2, 3], []),
([0.0, 6.7, 10.0], []),
(["a", "bd", "ef"], []),
],
)
def test_add_categories(data, add):
pds = pd.Series(data, dtype="category")
gds = cudf.Series(data, dtype="category")
expected = pds.cat.add_categories(add)
actual = gds.cat.add_categories(add)
assert_eq(
expected.cat.codes, actual.cat.codes.astype(expected.cat.codes.dtype)
)
# Need to type-cast pandas object to str due to mixed-type
# support in "object"
assert_eq(
expected.cat.categories.astype("str")
if (expected.cat.categories.dtype == "object")
else expected.cat.categories,
actual.cat.categories,
)
@pytest.mark.parametrize(
"data,add",
[
([1, 2, 3], [1, 3, 11]),
([0.0, 6.7, 10.0], [1, 2, 0.0]),
(["a", "bd", "ef"], ["a", "bd", "a"]),
],
)
def test_add_categories_error(data, add):
pds = pd.Series(data, dtype="category")
gds = cudf.Series(data, dtype="category")
assert_exceptions_equal(
pds.cat.add_categories,
gds.cat.add_categories,
([add],),
([add],),
compare_error_message=False,
)
def test_add_categories_mixed_error():
gds = cudf.Series(["a", "bd", "ef"], dtype="category")
with pytest.raises(TypeError):
gds.cat.add_categories([1, 2, 3])
gds = cudf.Series([1, 2, 3], dtype="category")
with pytest.raises(TypeError):
gds.cat.add_categories(["a", "bd", "ef"])
@pytest.mark.parametrize(
"data",
[
[1, 2, 3, 4],
["a", "1", "2", "1", "a"],
pd.Series(["a", "1", "22", "1", "aa"]),
pd.Series(["a", "1", "22", "1", "aa"], dtype="category"),
pd.Series([1, 2, 3, 4], dtype="int64"),
pd.Series([1, 2.3, 3, 4], dtype="float"),
[None, 1, None, 2, None],
["a"],
],
)
@pytest.mark.parametrize(
"cat_dtype",
[
pd.CategoricalDtype(categories=["aa", "bb", "cc"]),
pd.CategoricalDtype(categories=[2, 4, 10, 100]),
pd.CategoricalDtype(categories=["aa", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "b", "c"]),
pd.CategoricalDtype(categories=["22", "b", "c"]),
pd.CategoricalDtype(categories=["a"]),
],
)
def test_categorical_assignment(data, cat_dtype):
pd_df = pd.DataFrame()
pd_df["a"] = np.ones(len(data))
cd_df = cudf.from_pandas(pd_df)
pd_cat_series = pd.Series(data, dtype=cat_dtype)
# assign categorical series
pd_df.assign(cat_col=pd_cat_series)
cd_df.assign(cat_col=pd_cat_series)
assert_eq(pd_df, cd_df)
# assign categorical array
# needed for dask_cudf support for including file name
# as a categorical column
# see issue: https://github.com/rapidsai/cudf/issues/2269
pd_df = pd.DataFrame()
pd_df["a"] = np.ones(len(data))
cd_df = cudf.from_pandas(pd_df)
pd_categorical = | pd.Categorical(data, dtype=cat_dtype) | pandas.Categorical |
from functools import reduce
import re
import numpy as np
import pandas as pd
from avaml import _NONE
from avaml.aggregatedata.__init__ import DatasetMissingLabel
from avaml.score.overlap import calc_overlap
__author__ = 'arwi'
VECTOR_WETNESS_LOOSE = {
_NONE: (0, 0),
"new-loose": (0, 1),
"wet-loose": (1, 1),
"new-slab": (0, 0.4),
"drift-slab": (0, 0.2),
"pwl-slab": (0, 0),
"wet-slab": (1, 0),
"glide": (0.8, 0),
}
VECTOR_FREQ = {
"dsize": {
_NONE: 0,
'0': 0,
'1': 0.2,
'2': 0.4,
'3': 0.6,
'4': 0.8,
'5': 1,
},
"dist": {
_NONE: 0,
'0': 0,
'1': 0.25,
'2': 0.5,
'3': 0.75,
'4': 1,
},
"trig": {
_NONE: 0,
'0': 0,
'10': 1 / 3,
'21': 2 / 3,
'22': 1,
},
"prob": {
_NONE: 0,
'0': 0,
'2': 1 / 3,
'3': 2 / 3,
'5': 1,
},
}
class Score:
def __init__(self, labeled_data):
def to_vec(df):
level_2 = ["wet", "loose", "freq", "lev_max", "lev_min", "lev_fill", "aspect"]
columns = pd.MultiIndex.from_product([["global"], ["danger_level", "emergency_warning"]]).append(
pd.MultiIndex.from_product([[f"problem_{n}" for n in range(1, 4)], level_2])
)
vectors = pd.DataFrame(index=df.index, columns=columns)
vectors[("global", "danger_level")] = df[("CLASS", _NONE, "danger_level")].astype(np.int) / 5
vectors[("global", "emergency_warning")] = (
df[("CLASS", _NONE, "emergency_warning")] == "Naturlig utløste skred"
).astype(np.int)
for idx, row in df.iterrows():
for prob_n in [f"problem_{n}" for n in range(1, 4)]:
problem = row["CLASS", _NONE, prob_n]
if problem == _NONE:
vectors.loc[idx, prob_n] = [0, 0, 0, 0, 0, 2, "00000000"]
else:
p_class = row["CLASS", problem]
p_real = row["REAL", problem]
wet = VECTOR_WETNESS_LOOSE[problem][0]
loose = VECTOR_WETNESS_LOOSE[problem][1]
freq = reduce(lambda x, y: x * VECTOR_FREQ[y][p_class[y]], VECTOR_FREQ.keys(), 1)
lev_max = float(p_real["lev_max"]) if p_real["lev_max"] else 0.0
lev_min = float(p_real["lev_min"]) if p_real["lev_min"] else 0.0
lev_fill = int(p_class["lev_fill"]) if p_class["lev_fill"] else 0
aspect = row["MULTI", problem, "aspect"]
vectors.loc[idx, prob_n] = [wet, loose, freq, lev_max, lev_min, lev_fill, aspect]
return vectors
if labeled_data.label is None or labeled_data.pred is None:
raise DatasetMissingLabel()
self.label_vectors = to_vec(labeled_data.label)
self.pred_vectors = to_vec(labeled_data.pred)
def calc(self):
weights = np.array([0.20535988, 0.0949475, 1.])
diff_cols = [not re.match(r"^(lev_)|(aspect)", col) for col in self.label_vectors.columns.get_level_values(1)]
diff = self.pred_vectors.loc[:, diff_cols] - self.label_vectors.loc[:, diff_cols]
p_score_cols = | pd.MultiIndex.from_tuples([("global", "problem_score")]) | pandas.MultiIndex.from_tuples |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Networks import Trainer
import Preprocessing
def show_graph(data):
indexes = pd.MultiIndex.from_product([['Min', 'Max', 'Mean'], ['Inflow', 'Outflow']], names=['Measure', 'Type'])
test = np.hstack([np.min(data, axis=(2, 3)), np.max(data, axis=(2, 3)), np.mean(data, axis=(2, 3))])
data_summary = | pd.DataFrame(test, columns=indexes) | pandas.DataFrame |
import LS
import time as imp_t
from tqdm import tqdm
import pandas as pd
import numpy as np
def run_LS(sector, targetsfilepath, saveresultspath):
'''
~ runs Lomb-Scargle analysis on TESS light curves for 2-minute cadence targets and
writes values and final stat table to file~
REQUIRES: time, tqdm, pandas, numpy, LS
Args:
sector -(int) sector of observation for saving
targetsfilepath -(str) full path and file name of mit tess page sector target lists
saveresultspath -(str) full path to location for saved arrays
Returns:
DataFrame of tic, Rvar, highest 3 power amplitudes with corresponding rotation periods
'''
sector_table = | pd.read_csv(targetsfilepath,skiprows=5) | pandas.read_csv |
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.analyze as ha
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# Put all the directories (different states and resolutions) and corresponding NetCDF files into lists.
list_of_files = []
list_of_dir = []
streamflow_dict = {}
list_streams = []
for i in os.listdir('/home/chrisedwards/Documents/rapid_output/mult_res_output'):
for j in os.listdir(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i)):
list_of_files.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j,
'Qout_erai_t511_24hr_19800101to20141231.nc'))
list_of_dir.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j))
list_of_dir.sort()
list_of_files.sort()
list_of_states=['az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col']
list_of_states.sort()
# Loop through the lists to create the csv for each stream, in each resolution.
for file, direc, state in zip(list_of_files, list_of_dir, list_of_states):
# Call the NetCDF file.
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
Q = nc.variables['Qout'][:]
sQ = nc.variables['sQout'][:]
time = nc.variables['time'][:].tolist()
# Convert time from 'seconds since 1970' to the actual date.
dates = pd.to_datetime(time, unit='s', origin='unix')
temp_dictionary = {}
counter = 0
for n in riv:
str=state+'-{}'.format(n)
temp_dictionary['{}'.format(str)] = | pd.DataFrame(data=Q[:, counter], index=dates, columns=[str]) | pandas.DataFrame |
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref, random, sys
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.data as hp_data
import model.sofda.hp.dyno as hp_dyno
import model.sofda.hp.sel as hp_sel
import model.sofda.fdmg.datos_fdmg as datos
#import matplotlib.pyplot as plt
#import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
#===============================================================================
# custom shortcuts
#===============================================================================
from model.sofda.fdmg.house import House
#from model.sofda.fdmg.dfunc import Dfunc
from model.sofda.fdmg.dmgfeat import Dmg_feat
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
class Fdmg( #flood damage model
hp_sel.Sel_controller, #no init
hp_dyno.Dyno_wrap, #add some empty containers
#hp.plot.Plot_o, #build the label
hp_sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp_oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp_oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price',
'write_fdmg_sum_fly',
])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
acode_sec_d = dict() #available acodes with dfunc data loaded (to check against binv request) {acode:asector}
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
write_beg_hist = True #whether to write the beg history or not
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = '0' #for slicing the number of floods we want to evaluate
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# defaults
#=======================================================================
if not self.session._write_data:
self.write_fdmg_sum = False
if not self.dbg_fld_cnt == 'all':
self.dbg_fld_cnt = int(float(self.dbg_fld_cnt))
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
#model assignment
if not self.model.__repr__() == self.__repr__():
raise IOError
#check we have all the datos we want
dname_exp = np.array(('rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl'))
boolar = np.invert(np.isin(dname_exp, self.session.pars_df_d['datos']))
if np.any(boolar):
"""allowing this?"""
logger.warning('missing %i expected datos: %s'%(boolar.sum(), dname_exp[boolar]))
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
return
#===========================================================================
# def xxxcheck_pars(self): #check your data pars
# #pull the datas frame
# df_raw = self.session.pars_df_d['datos']
#
# #=======================================================================
# # check mandatory data objects
# #=======================================================================
# if not 'binv' in df_raw['name'].tolist():
# raise Error('missing \'binv\'!')
#
# #=======================================================================
# # check optional data objects
# #=======================================================================
# fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
# boolidx = df_raw['name'].isin(fdmg_tab_nl)
#
# if not np.all(boolidx):
# raise IOError #passed some unexpected data names
#
# return
#===========================================================================
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
#make sure the one we are loking for is in there
if not self.session.flood_tbl_nm in list(self.ftblos_d.keys()):
raise Error('requested flood table name \'%s\' not found in loaded sets'%self.session.flood_tbl_nm)
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in list(self.fdmgo_d.keys()):
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
#check we have all the acodes
self.check_acodes()
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'acode_s', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self,
df_raw=None): #build a df from the dfunc tab
"""
20190512: upgraded to handle nores and mres types
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
#list of columns to expect
exp_colns = np.array(['acode','asector','place_code','dmg_code','dfunc_type','anchor_ht_code'])
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all').dropna(axis='index', how='all') #drop rows with all na
df1 = df1.drop(columns=['note', 'rank'], errors='ignore') #drop some columns we dont need
#=======================================================================
# checking
#=======================================================================
#expected columns
boolar = np.invert(np.isin(exp_colns, df1.columns))
if np.any(boolar):
raise Error('missing %i expected columns\n %s'%(boolar.sum, exp_colns[boolar]))
#rfda garage logic
boolidx = np.logical_and(df1['place_code'] == 'G', df1['dfunc_type'] == 'rfda')
if np.any(boolidx):
raise Error('got dfunc_type = rfda for a garage curve (no such thing)')
#=======================================================================
# calculated columns
#=======================================================================
df2 = df1.copy()
df2['dmg_type'] = df2['place_code'] + df2['dmg_code']
"""as acode whill change, we want to keep the name static
df2['name'] = df2['acode'] + df2['dmg_type']"""
df2['name'] = df2['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df2.columns:
boolidx = ~pd.isnull(df2['tailpath']) #get dfuncs with data requests
self.load_raw_dfunc(df2[boolidx])
df2 = df2.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns
#=======================================================================
# get special lists
#=======================================================================
#find total for exclusion
boolidx = np.invert((df2['place_code']=='total').astype(bool))
"""Im not using the total dfunc any more..."""
if not np.all(boolidx):
raise Error('i thinkn this has been disabled')
self.dmg_types = tuple(df2.loc[boolidx,'dmg_type'].dropna().unique().tolist())
self.dmg_codes = tuple(df2.loc[boolidx, 'dmg_code'].dropna().unique().tolist())
self.place_codes = tuple(df2.loc[boolidx,'place_code'].dropna().unique().tolist())
#=======================================================================
# #handle nulls
#=======================================================================
df3 = df2.copy()
for coln in ['dmg_type', 'name']:
df3.loc[:,coln] = df3[coln].replace(to_replace=np.nan, value='none')
#=======================================================================
# set this
#=======================================================================
self.session.pars_df_d['dfunc'] = df3
logger.debug('dfunc_df with %s'%str(df3.shape))
#=======================================================================
# get slice for houses
#=======================================================================
self.dfunc_mstr_df = df3[boolidx] #get this trim
return
"""
view(df3)
"""
def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo)
logger = self.logger.getChild('load_hse_geo')
#=======================================================================
# load and clean the pars
#=======================================================================
df_raw = hp_pd.load_xls_df(self.session.parspath,
sheetname = 'hse_geo', header = [0,1], logger = logger)
df = df_raw.dropna(how='all', axis = 'index') #drop any rows with all nulls
self.session.pars_df_d['hse_geo'] = df
#=======================================================================
# build a blank starter for each house to fill
#=======================================================================
omdex = df.columns #get the original mdex
'probably a cleaner way of doing this'
lvl0_values = omdex.get_level_values(0).unique().tolist()
lvl1_values = omdex.get_level_values(1).unique().tolist()
lvl1_values.append('t')
newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['place_code','finish_code'])
"""id prefer to use a shortend type (Float32)
but this makes all the type checking very difficult"""
geo_dxcol = pd.DataFrame(index = df.index, columns = newcols, dtype='Float32') #make the frame
self.geo_dxcol_blank = geo_dxcol
if self.db_f:
if np.any(pd.isnull(df)):
raise Error('got %i nulls in the hse_geo tab'%df.isna().sum().sum())
l = geo_dxcol.index.tolist()
if not l == ['area', 'height', 'per', 'inta']:
raise IOError
return
def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs
logger = self.logger.getChild('load_raw_dfunc')
logger.debug('with df \'%s\''%(str(meta_df_raw.shape)))
d = dict() #empty container
meta_df = meta_df_raw.copy()
#=======================================================================
# loop through each row and load the data
#=======================================================================
for indx, row in meta_df.iterrows():
inpath = os.path.join(row['headpath'], row['tailpath'])
df = hp_pd.load_smart_df(inpath,
index_col =None,
logger = logger)
d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu
logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), list(d.keys())))
self.dfunc_raw_d = d
return
def load_floods(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_floods')
logger.debug('setting floods df \n')
self.set_floods_df()
df = self.floods_df
logger.debug('raising floods \n')
d = self.raise_children_df(df, #build flood children
kid_class = Flood,
dup_sibs_f= True,
container = OrderedDict) #pass attributes from one tot eh next
#=======================================================================
# ordered by aep
#=======================================================================
fld_aep_od = OrderedDict()
for childname, childo in d.items():
if hasattr(childo, 'ari'):
fld_aep_od[childo.ari] = childo
else: raise IOError
logger.info('raised and bundled %i floods by aep'%len(fld_aep_od))
self.fld_aep_od = fld_aep_od
return
def set_floods_df(self): #build the flood meta data
logger = self.logger.getChild('set_floods_df')
df_raw = self.session.pars_df_d['floods']
df1 = df_raw.sort_values('ari').reset_index(drop=True)
df1['ari'] = df1['ari'].astype(np.int)
#=======================================================================
# slice for debug set
#=======================================================================
if self.db_f & (not self.dbg_fld_cnt == 'all'):
"""this would be much better with explicit typesetting"""
#check that we even have enough to do the slicing
if len(df1) < 2:
logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all')
raise IOError
df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame
dbg_fld_cnt = int(float(self.dbg_fld_cnt))
logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1)))
#===================================================================
# try to pull out and add the 100yr
#===================================================================
try:
boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl
if not boolidx.sum() == 1:
logger.debug('failed to locate 1 flood')
raise IOError
df2 = df2.append(df1[boolidx]) #add this row to the end
df1 = df1[~boolidx] #slice out this row
dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1
dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set
logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt))
except:
logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl)
df2 = df1.copy()
#===================================================================
# build list of extreme (low/high) floods
#===================================================================
evn_cnt = 0
odd_cnt = 0
for cnt in range(0, dbg_fld_cnt, 1):
if cnt % 2 == 0: #evens. pull from front
idxr = evn_cnt
evn_cnt += 1
else: #odds. pull from end
idxr = len(df1) - odd_cnt - 1
odd_cnt += 1
logger.debug('pulling flood with indexer %i'%(idxr))
ser = df1.iloc[idxr, :] #make thsi slice
df2 = df2.append(ser) #append this to the end
#clean up
df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True)
logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist()))
if not len(df) == int(self.dbg_fld_cnt):
raise IOError
else:
df = df1.copy()
if not len(df) > 0: raise IOError
self.floods_df = df
return
def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab
#logger = self.logger.getChild('set_area_prot_lvl')
"""
TODO: Consider moving this onto the binv and making the binv dynamic...
Calls:
handles for flood_tbl_nm
"""
logger = self.logger.getChild('set_area_prot_lvl')
logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm)
#=======================================================================
# get data
#=======================================================================
ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object
ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl')
return True
def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data
logger = self.logger.getChild('set_fhr')
logger.debug('assigning for \'fhz\' and \'bfe\'')
#get the data for this fhr set
fhr_tbl_o = self.fdmgo_d['fhr_tbl']
try:
df = fhr_tbl_o.d[self.fhr_nm]
except:
if not self.fhr_nm in list(fhr_tbl_o.d.keys()):
logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s'
%(self.fhr_nm, list(fhr_tbl_o.d.keys())))
raise IOError
#=======================================================================
# loop through each series and apply
#=======================================================================
"""
not the most generic way of handling this...
todo:
add generic method to the binv
can take ser or df
updates the childmeta_df if before init
updates the children if after init
"""
for hse_attn in ['fhz', 'bfe']:
ser = df[hse_attn]
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
self.binv.set_all_hse_atts(hse_attn, ser = ser)
else:
logger.debug('set column \'%s\' onto the binv_df'%hse_attn)
self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf
"""I dont like this
fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm)
fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)"""
return True
def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format
'kept this special syntax reader separate in case we want to change th eformat of the flood tables'
flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table
fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list
return fld_aep_l
def run(self, **kwargs): #placeholder for simulation runs
logger = self.logger.getChild('run')
logger.debug('on run_cnt %i'%self.run_cnt)
self.run_cnt += 1
self.state='run'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not isinstance(self.outpath, str):
raise IOError
logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg')
logger.info('for run_cnt %i'%self.run_cnt)
self.calc_fld_set(**kwargs)
return
def setup_res_dxcol(self, #setup the results frame
fld_aep_l = None,
#dmg_type_list = 'all',
bid_l = None):
#=======================================================================
# defaults
#=======================================================================
if bid_l == None: bid_l = self.binv.bid_l
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
#if dmg_type_list=='all': dmg_type_list = self.dmg_types
#=======================================================================
# setup the dxind for writing
#=======================================================================
lvl0_values = fld_aep_l
lvl1_values = self.dmg_df_cols #include extra reporting columns
#fold these into a mdex (each flood_aep has all dmg_types)
columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['flood_aep','hse_atts'])
dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame
self.dmg_dx_base = dmg_dx.copy()
if self.db_f:
logger = self.logger.getChild('setup_res_dxcol')
if self.write_beg_hist:
fld_aep_l.sort()
columns = pd.MultiIndex.from_product([fld_aep_l, ['egrd', 'cond']],
names=['flood_aep','egrd'])
self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns)
logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape))
else:
self.beg_hist_df = None
"""
dmg_dx.columns
"""
return
def calc_fld_set(self, #calc flood damage for the flood set
fld_aep_l = None, #list of flood aeps to calcluate
#dmg_type_list = 'all', #list of damage types to calculate
bid_l = None, #list of building names ot calculate
wsl_delta = None, #delta value to add to all wsl
wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used)
**run_fld): #kwargs to send to run_fld
'we could separate the object creation and the damage calculation'
"""
#=======================================================================
# INPUTS
#=======================================================================
fld_aep_l: list of floods to calc
this can be a custom list built by the user
extracted from the flood table (see session.get_ftbl_aeps)
loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\
bid_l: list of ids (matching the mind varaible set under Fdmg)
#=======================================================================
# OUTPUTS
#=======================================================================
dmg_dx: dxcol of flood damage across all dmg_types and floods
mdex
lvl0: flood aep
lvl1: dmg_type + extra cols
I wanted to have this flexible, so the dfunc could pass up extra headers
couldnt get it to work. instead used a global list and acheck
new headers must be added to the gloabl list and Dfunc.
index
bldg_id
#=======================================================================
# TODO:
#=======================================================================
setup to calc across binvs as well
"""
#=======================================================================
# defaults
#=======================================================================
start = time.time()
logger = self.logger.getChild('calc_fld_set')
if wtf is None: wtf = self.session.write_fdmg_set_dx
if wsl_delta is None: wsl_delta= self.wsl_delta
#=======================================================================
# setup and load the results frame
#=======================================================================
#check to see that all of these conditions pass
if not np.all([bid_l is None, fld_aep_l is None]):
logger.debug('non default run. rebuild the dmg_dx_base')
#non default run. rebuild the frame
self.setup_res_dxcol( fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l)
elif self.dmg_dx_base is None: #probably the first run
if not self.run_cnt == 1: raise IOError
logger.debug('self.dmg_dx_base is None. rebuilding')
self.setup_res_dxcol(fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l) #set it up with the defaults
dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base
#=======================================================================
# finish defaults
#=======================================================================
'these are all mostly for reporting'
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
""" leaving these as empty kwargs and letting floods handle
if bid_l == None: bid_l = binv_dato.bid_l
if dmg_type_list=='all': dmg_type_list = self.dmg_types """
"""
lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist()
lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()"""
logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta))
logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n')
#=======================================================================
# loop and calc eacch flood
#=======================================================================
fcnt = 0
first = True
for flood_aep in fld_aep_l: #lopo through and build each flood
#self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling
self.state = flood_aep
'useful for keeping track of what the model is doing'
#get teh flood
flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary
logger.debug('getting dmg_df for %s'%flood_dato.name)
#===================================================================
# run sequence
#===================================================================
#get damage for these depths
dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice
if dmg_df is None: continue #skip this one
#===================================================================
# wrap up
#===================================================================
dmg_dx[flood_aep] = dmg_df #store into the frame
fcnt += 1
logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape)))
#===================================================================
# checking
#===================================================================
if self.db_f:
#check that the floods are increasing
if first:
first = False
last_aep = None
else:
if not flood_aep > last_aep:
raise IOError
last_aep = flood_aep
#=======================================================================
# wrap up
#=======================================================================
self.state = 'na'
if wtf:
filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing
self.dmg_dx = dmg_dx
stop = time.time()
logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l)))
return
def get_results(self): #called by Timestep.run_dt()
self.state='wrap'
logger = self.logger.getChild('get_results')
#=======================================================================
# optionals
#=======================================================================
s = self.session.outpars_d[self.__class__.__name__]
if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum):
logger.debug("calc_summaries \n")
dmgs_df = self.calc_summaries()
self.dmgs_df = dmgs_df.copy()
else: dmgs_df = None
if ('ead_tot' in s) or ('dmg_df' in s):
logger.debug('\n')
self.calc_annulized(dmgs_df = dmgs_df, plot_f = False)
'this will also run calc_sumamries if it hasnt happened yet'
if 'dmg_tot' in s:
#get a cross section of the 'total' column across all flood_aeps and sum for all entries
self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum()
if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s):
logger.debug('get_fld_begrd_cnt')
self.get_fld_begrd_cnt()
if 'fld_pwr_cnt' in s:
logger.debug('calc_fld_pwr_cnt \n')
cnt = 0
for aep, obj in self.fld_aep_od.items():
if obj.gpwr_f: cnt +=1
self.fld_pwr_cnt = cnt
self.binv.calc_binv_stats()
if self.session.write_fdmg_fancy:
self.write_res_fancy()
if self.write_fdmg_sum_fly: #write the results after each run
self.write_dmg_fly()
#update the bdmg_dx
if not self.session.bdmg_dx is None:
#add the timestep
bdmg_dx = pd.concat([self.dmg_dx],
keys=[self.tstep_o.name],
names=['tstep'],
axis=1,verify_integrity=True,copy=False)
bdmg_dx.index.name = self.mind
"""trying this as a column so we can append
#add the sim
bdmg_dx = pd.concat([bdmg_dx],
keys=[self.simu_o.name],
names=['simu'],
axis=1,verify_integrity=True,copy=False)"""
#join to the big
if len(self.session.bdmg_dx) == 0:
self.session.bdmg_dx = bdmg_dx.copy()
else:
self.session.bdmg_dx = self.session.bdmg_dx.join(bdmg_dx)
"""
view(self.session.bdmg_dx.join(bdmg_dx))
view(bdmg_dx)
view(self.session.bdmg_dx)
"""
#=======================================================================
# checks
#=======================================================================
if self.db_f:
self.check_dmg_dx()
logger.debug('finished \n')
def calc_summaries(self, #annualize the damages
fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary
dmg_dx=None,
plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr
wtf=None):
"""
basically dropping dimensions on the outputs and adding annuzlied damages
#=======================================================================
# OUTPUTS
#=======================================================================
DROP BINV DIMENSIOn
dmgs_df: df with
columns: raw damage types, and annualized damage types
index: each flood
entries: total damage for binv
DROP FLOODS DIMENSIOn
aad_sum_ser
DROP ALL DIMENSIONS
ead_tot
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_summaries')
if dmg_dx is None: dmg_dx = self.dmg_dx.copy()
if plot is None: plot = self.session._write_figs
if wtf is None: wtf = self.write_fdmg_sum
#=======================================================================
# #setup frame
#=======================================================================
#get the columns
dmg_types = list(self.dmg_types) + ['total']
#=======================================================================
# #build the annualized damage type names
#=======================================================================
admg_types = []
for entry in dmg_types: admg_types.append(entry+'_a')
cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l
dmgs_df = pd.DataFrame(columns = cols)
dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique()
dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True)
#=======================================================================
# loop through and fill out the data
#=======================================================================
for index, row in dmgs_df.iterrows(): #loop through an dfill out
dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep
#sum all the damage types
for dmg_type in dmg_types:
row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up
#calc the probability
row['prob_raw'] = 1/float(row['ari']) #inverse of aep
row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier
#calculate the annualized damages
for admg_type in admg_types:
dmg_type = admg_type[:-2] #drop the a
row[admg_type] = row[dmg_type] * row['prob']
#===================================================================
# get stats from the floodo
#===================================================================
floodo = self.fld_aep_od[row['ari']]
for attn in fsts_l:
row[attn] = getattr(floodo, attn)
#===================================================================
# #add this row backinto the frame
#===================================================================
dmgs_df.loc[index,:] = row
#=======================================================================
# get series totals
#=======================================================================
dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true')
#=======================================================================
# closeout
#=======================================================================
logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df)))
if wtf:
filetail = '%s dmg_sumry'%(self.session.state)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing
logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist()))
if plot:
self.plot_dmgs(wtf=wtf)
#=======================================================================
# post check
#=======================================================================
if self.db_f:
#check for sort logic
if not dmgs_df.loc[:,'prob'].is_monotonic:
raise IOError
if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order
logger.warning('bigger floods arent causing more damage')
'some of the flood tables seem bad...'
#raise IOError
#all probabilities should be larger than zero
if not np.all(dmgs_df.loc[:,'prob'] > 0):
raise IOError
return dmgs_df
def calc_annulized(self, dmgs_df = None,
ltail = None, rtail = None, plot_f=None,
dx = 0.001): #get teh area under the damage curve
"""
#=======================================================================
# INPUTS
#=======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
'none': don't extend the tail
rtail: right trail treatment (high prob low damage)
'none': don't extend
'2year': extend to zero damage at the 2 year aep
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_annulized')
if ltail is None: ltail = self.ca_ltail
if rtail is None: rtail = self.ca_rtail
'plotter ignores passed kwargs here'
if plot_f is None: plot_f= self.session._write_figs
#=======================================================================
# get data
#=======================================================================
if dmgs_df is None:
dmgs_df = self.calc_summaries()
#df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True)
'only slicing columns for testing'
df = dmgs_df.copy().reset_index(drop=True)
#=======================================================================
# shortcuts
#=======================================================================
if len(df) <2 :
logger.warning('not enough floods to calculate EAD')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
if df['total'].sum() < 1:
logger.warning('calculated zero damages!')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape)))
#=======================================================================
# left tail treatment
#=======================================================================
if ltail == 'flat':
#zero probability
'assume 1000yr flood is the max damage'
max_dmg = df['total'].max()*1.0001
df.loc[-1, 'prob'] = 0
df.loc[-1, 'ari'] = 999999
df.loc[-1, 'total'] = max_dmg
logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg)
elif ltail == 'none':
pass
else: raise IOError
'todo: add option for value multiplier'
#=======================================================================
# right tail
#=======================================================================
if rtail == 'none':
pass
elif hp_basic.isnum(rtail):
rtail_yr = float(rtail)
rtail_p = 1.0 / rtail_yr
max_p = df['prob'].max()
#floor check
if rtail_p < max_p:
logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p))
raise IOError
#same
elif rtail_p == max_p:
logger.debug("rtail_p == min(xl. no changes made")
else:
logger.debug("adding zero damage for aep = %.1f"%rtail_yr)
#zero damage
'assume no damage occurs at the passed rtail_yr'
loc = len(df)
df.loc[loc, 'prob'] = rtail_p
df.loc[loc, 'ari'] = 1.0/rtail_p
df.loc[loc, 'total'] = 0
"""
hp_pd.view_web_df(self.data)
"""
else: raise IOError
#=======================================================================
# clean up
#=======================================================================
df = df.sort_index() #resort the index
if self.db_f:
'these should still hold'
if not df.loc[:,'prob'].is_monotonic:
raise IOError
"""see above
if not df['total'].iloc[::-1].is_monotonic:
raise IOError"""
x, y = df['prob'].values.tolist(), df['total'].values.tolist()
#=======================================================================
# find area under curve
#=======================================================================
try:
#ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg')
'this was giving some weird results'
ead_tot = scipy.integrate.trapz(y, x, dx = dx)
except:
raise Error('scipy.integrate.trapz failed')
logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\''
%(ead_tot, len(y), ltail, rtail))
self.ead_tot = ead_tot
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if pd.isnull(ead_tot):
raise IOError
if not isinstance(ead_tot, float):
raise IOError
if ead_tot <=0:
"""
view(df)
"""
raise Error('got negative damage! %.2f'%ead_tot)
#=======================================================================
# update data with tails
#=======================================================================
self.dmgs_df_wtail = df.sort_index().reset_index(drop=True)
#=======================================================================
# generate plot
#=======================================================================
if plot_f:
self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False)
return
def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood
logger = self.logger.getChild('get_fld_begrd_cnt')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
#lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist()
#get all teh basement egrade types
df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values
#get occurances by value
d = hp_pd.sum_occurances(df1, logger=logger)
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i bsmt_egrds: %s'%(len(d), list(d.keys())))
for bsmt_egrd, cnt in d.items():
attn = 'b'+bsmt_egrd +'_cnt'
logger.debug('for \'%s\' got %i'%(attn, cnt))
setattr(self, attn, cnt)
logger.debug('finished \n')
def check_dmg_dx(self): #check logical consistency of the damage results
logger = self.logger.getChild('check_dmg_dx')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
mdex = dmg_dx.columns
aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist()
aep_l.sort()
#=======================================================================
# check that each flood increases in damage
#=======================================================================
total = None
aep_last = None
for aep in aep_l:
#get this slice
df = dmg_dx[aep]
if total is None:
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
total = df.loc[:,boolcol].sum().sum()
if not aep == min(aep_l):
raise IOError
else:
newtot = df.loc[:,boolcol].sum().sum()
if not newtot >= total:
logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total))
#raise IOError
#print 'new tot %.2f > oldtot %.2f'%(newtot, total)
total = newtot
aep_last = aep
return
def check_acodes(self, #check you have curves for all the acodes
ac_sec_d = None, #set of Loaded acodes {acode: asecotr}
ac_req_l = None, #set of requested acodes
dfunc_df = None, #contorl file page for the dfunc parameters
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('check_acodes')
if ac_sec_d is None: ac_sec_d = self.acode_sec_d
if ac_req_l is None: ac_req_l = self.binv.acode_l #pull from the binv
if dfunc_df is None: dfunc_df = self.session.pars_df_d['dfunc']
log.debug('checking acodes requested by binv against %i available'%len(ac_sec_d))
"""
for k, v in ac_sec_d.items():
print(k, v)
"""
#=======================================================================
# conversions
#=======================================================================
ava_ar = np.array(list(ac_sec_d.keys())) #convert availables to an array
req_ar = np.array(ac_req_l)
#get the pars set
pars_ar_raw = dfunc_df['acode'].dropna().unique()
pars_ar = pars_ar_raw[pars_ar_raw!='none'] #drop the nones
#=======================================================================
# check we loaded everything we requested in the pars
#=======================================================================
boolar = np.invert(np.isin(pars_ar, ava_ar))
if np.any(boolar):
raise Error('%i acodes requested by the pars were not loaded: \n %s'
%(boolar.sum(), req_ar[boolar]))
#=======================================================================
# check the binv doesnt have anything we dont have pars for
#=======================================================================
boolar = np.invert(np.isin(req_ar, pars_ar))
if np.any(boolar):
raise Error('%i binv acodes not found on the \'dfunc\' tab: \n %s'
%(boolar.sum(), req_ar[boolar]))
return
def wrap_up(self):
#=======================================================================
# update asset containers
#=======================================================================
"""
#building inventory
'should be flagged for updating during House.notify()'
if self.binv.upd_kid_f:
self.binv.update()"""
"""dont think we need this here any more.. only on udev.
keeping it just to be save"""
self.last_tstep = copy.copy(self.time)
self.state='close'
def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr
dmg_dx=None,
include_ins = False,
include_raw = False,
include_begh = True):
"""
#=======================================================================
# INPUTS
#=======================================================================
include_ins: whether ot add inputs as tabs.
ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('write_res_fancy')
if dmg_dx is None: dmg_dx = self.dmg_dx
if dmg_dx is None:
logger.warning('got no dmg_dx. skipping')
return
#=======================================================================
# setup
#=======================================================================
od = OrderedDict()
#=======================================================================
# add the parameters
#=======================================================================
#get the blank frame
df = pd.DataFrame(columns = ['par','value'] )
df['par'] = list(self.try_inherit_anl)
for indx, row in df.iterrows():
df.iloc[indx, 1] = getattr(self, row['par']) #set this value
od['pars'] = df
#=======================================================================
# try and add damage summary
#=======================================================================
if not self.dmgs_df is None:
od['dmg summary'] = self.dmgs_df
#=======================================================================
# #get theh dmg_dx decomposed
#=======================================================================
od.update(hp_pd.dxcol_to_df_set(dmg_dx, logger=self.logger))
#=======================================================================
# #add dmg_dx as a raw tab
#=======================================================================
if include_raw:
od['raw_res'] = dmg_dx
#=======================================================================
# add inputs
#=======================================================================
if include_ins:
for dataname, dato in self.kids_d.items():
if hasattr(dato, 'data') & hp_pd.isdf(dato.data):
od[dataname] = dato.data
#=======================================================================
# add debuggers
#=======================================================================
if include_begh:
if not self.beg_hist_df is None:
od['beg_hist'] = self.beg_hist_df
#=======================================================================
# #write to excel
#=======================================================================
filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger)
return
def write_dmg_fly(self): #write damage results after each run
logger = self.logger.getChild('write_dmg_fly')
dxcol = self.dmg_dx #results
#=======================================================================
# build the resuults summary series
#=======================================================================
#get all the flood aeps
lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist()
#blank holder
res_ser = pd.Series(index = lvl0vals)
#loop and calc sums for each flood
for aep in lvl0vals:
res_ser[aep] = dxcol.loc[:,(aep,'total')].sum()
#add extras
if not self.ead_tot is None:
res_ser['ead_tot'] = self.ead_tot
res_ser['dt'] = self.tstep_o.year
res_ser['sim'] = self.simu_o.ind
lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name)
hp_pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex,
first = self.write_dmg_fly_first, tag = 'fdmg totals',
db_f = self.db_f, logger=logger) #write results on the fly
self.write_dmg_fly_first = False
return
def get_plot_kids(self): #raise kids for plotting the damage summaries
logger = self.logger.getChild('get_plot_kids')
#=======================================================================
# get slice of aad_fmt_df matching the aad cols
#=======================================================================
aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab
dmgs_df = self.dmgs_df
self.data = dmgs_df
boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad
aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3
"""
hp_pd.view_web_df(self.data)
hp_pd.view_web_df(df)
hp_pd.view_web_df(aad_fmt_df_slice)
aad_fmt_df_slice.columns
"""
#=======================================================================
# formatter kids setup
#=======================================================================
"""need to run this every time so the data is updated
TODO: allow some updating here so we dont have to reduibl deach time
if self.plotter_kids_dict is None:"""
self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp_data.Data_o)
logger.debug('finisehd \n')
#===============================================================================
# def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True,
# ylims = None, #tuple of min/max values for the y-axis
# ): #plot curve of aad
# """
# see tab 'aad_fmt' to control what is plotted and formatting
# """
# #=======================================================================
# # defaults
# #=======================================================================
# logger = self.logger.getChild('plot_dmgs')
# if wtf == None: wtf = self.session._write_figs
#
# #=======================================================================
# # prechecks
# #=======================================================================
# if self.db_f:
# if self.dmgs_df is None:
# raise IOError
#
#
# #=======================================================================
# # setup
# #=======================================================================
# if not ylims is None:
# try:
# ylims = eval(ylims)
# except:
# pass
#
# #get the plot workers
# if self.plotr_d is None:
# self.get_plot_kids()
#
# kids_d = self.plotr_d
#
# title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df))
# logger.debug('with \'%s\''%title)
#
# if not self.tstep_o is None:
# title = title + ' for %s'%self.tstep_o.name
#
# #=======================================================================
# # update plotters
# #=======================================================================
# logger.debug('updating plotters with my data')
#
# #get data
# data_og = self.data.copy() #store this for later
#
# if self.dmgs_df_wtail is None:
# df = self.dmgs_df.copy()
# else:
# df = self.dmgs_df_wtail.copy()
#
# df = df.sort_values(xaxis, ascending=True)
#
# #reformat data
# df.set_index(xaxis, inplace = True)
#
# #re set
# self.data = df
#
# #tell kids to refresh their data from here
# for gid, obj in kids_d.items(): obj.data = obj.loadr_vir()
#
# self.data = data_og #reset the data
#
# #=======================================================================
# # get annotation
# #=======================================================================
# val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6)
# #val_str = "{:,.2f}".format(self.ead_tot)
# """
# txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\
# 'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))"""
#
#
# txt = 'total EAD = %s'%val_str
#
#
# #=======================================================================
# #plot the workers
# #=======================================================================
# #twinx
# if not right_nm is None:
# logger.debug('twinning axis with name \'%s\''%right_nm)
# title = title + '_twin'
# # sort children into left/right buckets by name to plot on each axis
# right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm)
#
# if self.db_f:
# if len (right_pdb_d) <1: raise IOError
#
# #=======================================================================
# # #send for plotting
# #=======================================================================
# 'this plots both bundles by their data indexes'
# ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d,
# logx=logx, xlab = xaxis, title=title, annot = txt,
# wtf=False)
# 'cant figure out why teh annot is plotting twice'
#
# ax2.set_ylim(0, 1) #prob limits
# legon = False
# else:
# logger.debug('single axis')
#
# try:
# del kids_d['prob']
# except:
# pass
#
# pdb = self.get_pdb_dict(list(kids_d.values()))
#
# ax1 = self.plot_bundles(pdb,
# logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt,
# wtf=False)
#
# legon=True
#
# #hatch
# #=======================================================================
# # post formatting
# #=======================================================================
# #set axis limits
# if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits
# elif xaxis == 'prob': ax1.set_xlim(0, .6)
#
# if not ylims is None:
# ax1.set_ylim(ylims[0], ylims[1])
#
#
# #ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits
#
#
# #=======================================================================
# # format y axis labels
# #======================================================= ================
# old_tick_l = ax1.get_yticks() #get teh old labels
#
# # build the new ticks
# l = []
#
# for value in old_tick_l:
# new_v = '$' + "{:,.0f}".format(value/1e6)
# l.append(new_v)
#
# #apply the new labels
# ax1.set_yticklabels(l)
#
# """
# #add thousands comma
# ax1.get_yaxis().set_major_formatter(
# #matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6)))
#
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))"""
#
# if xaxis == 'ari':
# ax1.get_xaxis().set_major_formatter(
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
#
#
# if wtf:
# fig = ax1.figure
# savepath_raw = os.path.join(self.outpath,title)
# flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon)
# if not flag: raise IOError
#
#
# #plt.close()
# return
#===============================================================================
class Flood(
hp_dyno.Dyno_wrap,
hp_sim.Sim_o,
hp_oop.Parent, #flood object worker
hp_oop.Child):
#===========================================================================
# program pars
#===========================================================================
gpwr_f = False #grid power flag palceholder
#===========================================================================
# user defineid pars
#===========================================================================
ari = None
#loaded from flood table
#area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes).
area_egrd00 = ''
area_egrd01 = ''
area_egrd02 = ''
area_egrd00_code = None
area_egrd01_code = None
area_egrd02_code = None
#===========================================================================
# calculated pars
#===========================================================================
hdep_avg = 0 #average house depth
#damate properties
total = 0
BS = 0
BC = 0
MS = 0
MC = 0
dmg_gw = 0
dmg_sw = 0
dmg_df_blank =None
wsl_avg = 0
#===========================================================================
# data containers
#===========================================================================
hdmg_cnt = 0
dmg_df = None
dmg_res_df = None
#bsmt_egrd counters. see get_begrd_cnt()
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self, parent, *vars, **kwargs):
logger = mod_logger.getChild('Flood')
logger.debug('start _init_')
#=======================================================================
# #attach custom vars
#=======================================================================
self.inherit_parent_ans=set(['mind', 'dmg_types'])
#=======================================================================
# initilize cascade
#=======================================================================
super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# common setup
#=======================================================================
if self.sib_cnt == 0:
#update the resets
pass
#=======================================================================
# unique setup
#=======================================================================
""" handled by the outputr
self.reset_d.update({'hdmg_cnt':0})"""
self.ari = int(self.ari)
self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling
#=======================================================================
# setup functions
#=======================================================================
self.set_gpwr_f()
logger.debug('set_dmg_df_blank()')
self.set_dmg_df_blank()
logger.debug('get your water levels from the selected wsl table \n')
self.set_wsl_frm_tbl()
logger.debug('set_area_egrd()')
self.set_area_egrd()
logger.debug('get_info_from_binv()')
df = self.get_info_from_binv() #initial run to set blank frame
self.set_wsl_from_egrd(df)
""" moved into set_wsl_frm_tbl()
logger.debug('\n')
self.setup_dmg_df()"""
self.init_dyno()
self.logger.debug('__init___ finished \n')
def set_dmg_df_blank(self):
logger = self.logger.getChild('set_dmg_df_blank')
binv_df = self.model.binv.childmeta_df
colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl'])
'wsl should be redundant'
#get boolean
self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv()
#get teh blank frame
self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame
'this still needs the wsl levels attached based on your area exposure grade'
logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape)))
return
def set_gpwr_f(self): #set your power flag
if self.is_frozen('gpwr_f'): return True#shortcut for frozen
logger = self.logger.getChild('set_gpwr_f')
#=======================================================================
# get based on aep
#=======================================================================
min_aep = int(self.model.gpwr_aep)
if self.ari < min_aep: gpwr_f = True
else: gpwr_f = False
logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f))
#update handler
self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f')
return True
def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table
flood_tbl_nm = None, #name of flood table to pull raw data from
#bid_l=None,
):
"""
here we get the raw values
these are later modified by teh area_egrd with self.get_wsl_from_egrd()
#=======================================================================
# INPUTS
#=======================================================================
flood_tbl_df_raw: raw df of the classic flood table
columns:` count, aep, aep, aep, aep....\
real_columns: bldg_id, bid, depth, depth, depth, etc...
index: unique arbitrary
wsl_ser: series of wsl for this flood on each bldg_id
#=======================================================================
# calls
#=======================================================================
dynp handles Fdmg.flood_tbl_nm
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_frm_tbl')
if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm
#=======================================================================
# get data
#=======================================================================
#pull the raw flood tables
ftbl_o = self.model.ftblos_d[flood_tbl_nm]
wsl_d = ftbl_o.wsl_d
df = pd.DataFrame(index = list(wsl_d.values())[0].index) #blank frame from teh first entry
#=======================================================================
# loop and apply for each flood type
#=======================================================================
for ftype, df1 in wsl_d.items():
#=======================================================================
# data checks
#=======================================================================
if self.db_f:
if not ftype in ['wet', 'dry', 'damp']:
raise IOError
df_raw =df1.copy()
if not self.ari in df_raw.columns:
logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'%
(self.ari, self.model.ftblos_d[flood_tbl_nm].filepath))
raise IOError
#=======================================================================
# slice for this flood
#=======================================================================
boolcol = df1.columns == self.ari #slice for this aep
#get the series for this
wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float)
#wsl_ser = wsl_ser.rename(ftype) #rename with the aep
'binv slicing moved to Flood_tbl.clean_data()'
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if len(wsl_ser) <1:
raise IOError
""" allowing
#check for nuls
if np.any(pd.isnull(wsl_ser2)):
raise IOError"""
#=======================================================================
# wrap up report and attach
#=======================================================================
df[ftype] = wsl_ser
logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i'
%(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari))
self.wsl_df = df #set this
'notusing dy nps'
if self.session.state == 'init':
self.reset_d['wsl_df'] = df.copy()
return True
def set_area_egrd(self): #pull your area exposure grade from somewhere
"""
#=======================================================================
# calls
#=======================================================================
self.__init__()
dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there
"""
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state=='init':
dep_l = [([self.model], ['set_area_prot_lvl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'):
return False
logger = self.logger.getChild('set_area_egrd')
#=======================================================================
# steal egrd from elsewhere table if asked
#=======================================================================
for cnt in range(0,3,1): #loop through each one
attn = 'area_egrd%02d'%cnt
area_egrd_code = getattr(self, attn + '_code')
if area_egrd_code in ['dry', 'damp', 'wet']:
area_egrd = area_egrd_code
#===================================================================
# pull from teh flood table
#===================================================================
elif area_egrd_code == '*ftbl':
ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object
area_egrd = getattr(ftbl_o, attn) #get from teh table
#===================================================================
# pull from teh model
#===================================================================
elif area_egrd_code == '*model':
area_egrd = getattr(self.model, attn) #get from teh table
else:
logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code))
raise IOError
#===================================================================
# set these
#===================================================================
self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd')
'this should triger generating a new wsl set to teh blank_dmg_df'
logger.debug('set \'%s\' from \'%s\' as \'%s\''
%(attn, area_egrd_code,area_egrd))
if self.db_f:
if not area_egrd in ['dry', 'damp', 'wet']:
raise IOError
return True
def set_wsl_from_egrd(self, #calculate the wsl based on teh area_egrd
df = None):
"""
This is a partial results retrival for non damage function results
TODO:
consider checking for depednency on House.area_prot_lvl
#=======================================================================
# calls
#=======================================================================
self.__init__
dynp handles for:
Flood.area_egrd##
Fdmg.flood_tbl_nm
if area_egrd_code == *model, this loop isnt really necessary
"""
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
dep_l = [([self], ['set_area_egrd', 'set_wsl_frm_tbl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_from_egrd')
#if wsl_delta is None: wsl_delta = self.model.wsl_delta
#=======================================================================
# get data
#=======================================================================
if df is None: df = self.get_info_from_binv()
'need to have updated area_prot_lvls'
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not isinstance(df, pd.DataFrame): raise IOError
if not len(df) > 0: raise IOError
#=======================================================================
# add the wsl for each area_egrd
#=======================================================================
for prot_lvl in range(0,3,1): #loop through each one
#get your grade fro this prot_lvl
attn = 'area_egrd%02d'%prot_lvl
area_egrd = getattr(self, attn)
#identify the housese for this protection level
boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl
if boolidx.sum() == 0: continue
#give them the wsl corresponding to this grade
df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd]
#set a tag for the area_egrd
if 'area_egrd' in df.columns:
df.loc[boolidx, 'area_egrd'] = area_egrd
logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd))
#=======================================================================
# set this
#=======================================================================
self.dmg_df_blank = df
#=======================================================================
# post check
#=======================================================================
logger.debug('set dmg_df_blank with %s'%str(df.shape))
if self.session.state=='init':
self.reset_d['dmg_df_blank'] = df.copy()
if self.db_f:
if np.any( | pd.isnull(df['wsl']) | pandas.isnull |
from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - | Timedelta(1, "ns") | pandas.Timedelta |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
| pandas.DataFrame.from_items(items, columns=columns, orient=orient) | pandas.DataFrame.from_items |
from math import log
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import OrderedDict
from itertools import combinations_with_replacement, product
from typing import Sequence
from sklearn.metrics import pairwise_distances
import spatialdata
def reduce_matrix_size(matrix, types, dicts):
"""
reduce matrix size
Parameters
----------
matrix: array
types: array
storage: dict
Returns
-------
array,row_merged matrix based on types
"""
for i, arr in enumerate(matrix):
dicts[types[i]].append(arr)
for k, v in dicts.items():
dicts[k] = np.asarray(v).sum(axis=0)
new_types = []
new_matx = []
for k, v in dicts.items():
new_types.append(k)
new_matx.append(v)
return new_matx
def type_adj_matrix(matrix, types):
"""return an N * N matrix, N is the number of unique types
Args:
matrix: array
types: array
Returns:
tuple, matrix and the unique types
"""
unitypes = np.unique(types)
storage = OrderedDict(zip(unitypes, [[] for _ in range(len(unitypes))]))
new_matrix = reduce_matrix_size(matrix, types, storage)
storage = OrderedDict(zip(unitypes, [[] for _ in range(len(unitypes))]))
type_matrix = reduce_matrix_size(np.asarray(new_matrix).T, types, storage)
return np.array(type_matrix), unitypes
def pairs_counter(matrix, types, order=False):
"""count how many pairs of types in the matrix
Args:
matrix: array
types: array
order: bool, if True, (x1, x2) and (x2, x1) is not the same
Returns:
dict, the count of each pairs
"""
it = np.nditer(matrix, flags=["multi_index"])
if order:
combs = [i for i in product(types, repeat=2)]
storage = OrderedDict(zip(combs, [0 for _ in range(len(combs))]))
for x in it:
(i1, i2) = it.multi_index
storage[(types[i1], types[i2])] += x
else:
combs = [i for i in combinations_with_replacement(types, 2)]
storage = OrderedDict(zip(combs, [0 for _ in range(len(combs))]))
for x in it:
(i1, i2) = it.multi_index
if i1 <= i2:
storage[(types[i1], types[i2])] += x
else:
storage[(types[i2], types[i1])] += x
return storage
def interval_pairs(arr):
new_arr = []
for i, x in enumerate(arr):
if i < len(arr) - 1:
new_arr.append((x, arr[i + 1]))
return new_arr
def cell_ShannonEntropy(
obs: pd.DataFrame,
):
"""
Parameters
----------
obs
Annotation data matrix of M cells
-------
calculate the spatial shannon entropy of each cell type
"""
n_cell_types = set(obs['cell_types'])
cell_shannon = {}
for i in n_cell_types:
cell_type = obs.iloc[list(np.where(obs['cell_types'] == i)[0])]
length_cell_type = len(cell_type)
cell_type_dict = dict(cell_type['label'].value_counts())
shannon_ent = 0.0
for key in cell_type_dict:
prob = float(cell_type_dict[key]) / length_cell_type
shannon_ent -= prob * log(prob, 2)
cell_shannon[i] = shannon_ent
return cell_shannon
class spatial_entropy(object):
"""
calculate the spatial entropy by altieri entropy
"""
def __init__(self, cell_cor, types, cut=None, order=False, base=None):
if len(cell_cor) != len(types):
raise ValueError("length of cell and cell types should be the same")
if base is None:
base = np.e
self._cell_cor = cell_cor
self._types = types
self._order = order
self._base = base
self.adj_matrix = pairwise_distances(self._cell_cor)
if isinstance(cut, int):
self._break = interval_pairs(np.linspace(0, self.adj_matrix.max(), cut + 2))
elif isinstance(cut, Sequence):
self._break = interval_pairs(cut)
elif cut is None:
self._break = interval_pairs(np.linspace(0, self.adj_matrix.max(), 3)) # 没有指定cut长度则最小划分为三段
else:
raise ValueError("'cut' must be an int or an array-like object")
self._wrap()
def _Z_W(self):
zw = []
for (p1, p2) in self._break:
bool_matx = ((self.adj_matrix > p1) & (self.adj_matrix <= p2)).astype(int) # bool矩阵计算出在p1和p2区间内的坐标点
type_matx, utypes = type_adj_matrix(bool_matx, self._types)
pairs_counts = pairs_counter(type_matx, utypes, self._order)
zw.append(pairs_counts)
return zw
def _Z(self):
bool_matx = (self.adj_matrix >= 0).astype(int)
type_matx, utypes = type_adj_matrix(bool_matx, self._types)
z = pairs_counter(type_matx, utypes, self._order)
return z
def _W(self):
w = []
for (p1, p2) in self._break:
w.append(p2 - p1)
w = np.asarray(w)
w = w / w.sum()
return w
def _wrap(self):
zw = np.asarray(self._Z_W())
z = self._Z()
w = np.asarray(self._W())
pz = np.array(list(z.values()))
pz = pz / pz.sum()
H_Zwk = [] # H(Z|w_k)
PI_Zwk = [] # PI(Z|w_k)
for i in zw:
v_ = i.values()
v, pz_ = [], []
for ix, x in enumerate(v_):
if x != 0:
v.append(x)
pz_.append(pz[ix])
v = np.asarray(v)
pz_ = np.asarray(pz_)
v = v / v.sum()
H = v * np.log(1 / v) / np.log(self._base)
PI = v * np.log(v / pz_) / np.log(self._base)
H_Zwk.append(H.sum())
PI_Zwk.append(PI.sum())
self.residue = (w * np.asarray(H_Zwk)).sum()
self.mutual_info = (w * np.asarray(PI_Zwk)).sum()
self.entropy = self.mutual_info + self.residue
# 指定单个区域的空间熵计算
def swspatial_entropy(df, ux, uy, l, w, span, d='h', w_cut=10):
"""
Parameters
----------
data: the dataframe contain coordinates of each cell and cluster id
ux: upleft x of the window,coordinate space
uy: upleft y of the window,coordiante sapce
l:length of the window,coordinate space
w:width of the window,coordinate space
span: the step size of window
d: h,horizontal direction or v,vertical direction
Returns
-------
the spatial entropy of the window area
"""""
if l <= 0 or w <= 0:
raise ValueError("length and width of the window should be greater than 0")
if d == 'h':
x_coord_max = df['x_coord'].max()
if ux + l <= x_coord_max:
site = []
swse = []
for index in range(ux, x_coord_max, span):
spot = df.loc[df['x_coord'] > index]
spot = spot.loc[spot['x_coord'] < (index + l)]
spot = spot.loc[spot['y_coord'] > uy]
spot = spot.loc[spot['y_coord'] < uy + w]
coord = np.array(spot[['x_coord', 'y_coord']])
clusters = list(spot.iloc[:, 2])
print(len(coord))
print(len(clusters))
se = spatial_entropy(coord, clusters, cut=w_cut)
swse.append(se)
site.append(index)
if d == 'v':
y_coord_max = df['y_coord'].max()
if uy + w <= y_coord_max:
site = []
swse = []
for index in range(uy, y_coord_max, span):
spot = df.loc[df['x_coord'] > ux]
spot = spot.loc[spot['x_coord'] < ux + l]
spot = spot.loc[spot['y_coord'] > index]
spot = spot.loc[spot['y_coord'] < index + w]
coord = np.array(spot[['x_coord', 'y_coord']])
clusters = list(spot.iloc[:, 2])
se = spatial_entropy(coord, clusters, cut=w_cut)
swse.append(se)
site.append(index)
se_df = | pd.DataFrame({'ul_of_windows': site, 'spatial_entropy': swse}) | pandas.DataFrame |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEquals(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
| assert_frame_equal(result, expected.T) | pandas.util.testing.assert_frame_equal |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if | com.all_none(start, stop, step) | pandas.core.common.all_none |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
| assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = | pd.Categorical(s2, ordered=True) | pandas.Categorical |
# -*- coding: utf-8 -*-
"""Functions to sample sktime datasets.
Used in experiments to get deterministic resamples.
"""
import numpy as np
import pandas as pd
import sklearn.utils
def stratified_resample(X_train, y_train, X_test, y_test, random_state):
"""Stratified resample data without replacement using a random state.
Reproducable resampling. Combines train and test, resamples to get the same class
distribution, then returns new train and test.
Parameters
----------
X_train : pd.DataFrame
train data attributes in sktime pandas format.
y_train : np.array
train data class labels.
X_test : pd.DataFrame
test data attributes in sktime pandas format.
y_test : np.array
test data class labes as np array.
random_state : int
seed to enable reproducable resamples
Returns
-------
new train and test attributes and class labels.
"""
all_labels = np.concatenate((y_train, y_test), axis=None)
all_data = pd.concat([X_train, X_test])
random_state = sklearn.utils.check_random_state(random_state)
# count class occurrences
unique_train, counts_train = np.unique(y_train, return_counts=True)
unique_test, counts_test = np.unique(y_test, return_counts=True)
assert list(unique_train) == list(
unique_test
) # haven't built functionality to deal with classes that exist in
# test but not in train
# prepare outputs
X_train = pd.DataFrame()
y_train = np.array([])
X_test = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
'''
Background Reading to understand this tool ----
Paper: Portfolio Selection - <NAME> 1952 URL:
https://www.math.ust.hk/~maykwok/courses/ma362/07F/markowitz_JF.pdf
https://www.investopedia.com/terms/e/efficientfrontier.asp
https://en.wikipedia.org/wiki/Efficient_frontier
https://en.wikipedia.org/wiki/Markowitz_model
The idea is that there is an efficent set of portfolio containing different securities
with different weights of investments and for each amount of risk investor is willing to indure.
This set of efficient portfolios can be calculated and discovered. This script helps us understand how!
Enjoy!
'''
'''
Description:
------------
This Python script calculates Markowitz Efficient Frontier
API Used: Yahoo Finance
I am studying the efficiency (Markowitz-wise) of 500 portfolios containing only
stocks of Apple & Ford with many random weights for each portfolio
Duration: Data from 2010-1-1 till now
Requirements:
------------
Make sure that Pandas, pandas_datareader, numpy, matplotlib and xlrd are installed
no need for anything else
Usage:
-----
python Calculate_Markowitz_Efficient_Frontier.py
Dr. <NAME>
Enjoy!
'''
'''
Some famous companies stocks tikers to work with
------------------------------------------------
Apple AAPL
Procter & Gamble Co PG
Microsoft Corporation MSFT
Exxon Mobil Corporation XOM
BP plc BP
AT&T Inc. T
Ford Motor Company F
General Electric Company GE
Alphabet Inc Class A (Google) GOOGL
'''
import numpy as np
import pandas as pd
from pandas_datareader import data as web
import matplotlib.pyplot as plt
# Ford is F and Apple is AAPL
Stock_tickers = ['F', 'AAPL']
ComparisionofEfficiency = | pd.DataFrame() | pandas.DataFrame |
# IMPORTS
import pandas as pd
# DATA
data = []
with open("Data - Day08.txt") as file:
for line in file:
data.append(line.strip().split(" "))
data = pd.DataFrame(data, columns=["register", "action", "action_value", "if_col", "condition_register", "condition", "condition_value"])
data["action_value"] = | pd.to_numeric(data["action_value"]) | pandas.to_numeric |
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from keras import backend as K
import os
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import classification_report
import xgboost as xgb
CUR_DIR = os.path.abspath(os.curdir)
ROOT_DIR = os.path.dirname(CUR_DIR)
IMAGES_DIR = os.path.join(ROOT_DIR, "images")
DATA_DIR = os.path.join(ROOT_DIR, "data")
MODELS_DIR = os.path.join(ROOT_DIR, "models")
EVAL_DIR = os.path.join(ROOT_DIR, "evaluation")
MODEL_PERF_DIR = os.path.join(EVAL_DIR, "model_performance")
GRAPHS_DIR = os.path.join(EVAL_DIR, "graphs")
writepath = os.path.join(MODEL_PERF_DIR, "performance.csv")
plt.style.use('ggplot')
def plot_loss(history,model):
"""
The purpose of this function is to plot the validation and training loss function across epochs.
"""
plt.plot(history.history['mae'], label='training')
plt.plot(history.history['val_mae'], label='val')
plt.xlabel('epoch')
plt.ylabel('mae')
plt.title(f'Loss for {model.name}')
plt.legend(loc='upper right')
output_path = os.path.join(GRAPHS_DIR,f'Loss Plot {model.name}.png')
plt.savefig(output_path)
plt.show()
print(output_path)
def relu_advanced(x):
from keras import backend as K
"""The purpose of this function is the bound the output value of the network between 1 and 5 inclusively which matches the domain the stars get on the reviews."""
return (K.relu(x, max_value=5))
def transpose_df(df,reset_index,prefix):
if reset_index == False:
out_df = df.groupby('star',as_index=False)['prediction'].mean().T
elif reset_index == True:
out_df = pd.DataFrame(df.groupby('star')['prediction'].skew()).reset_index().T
new_header = out_df.iloc[0]
new_header = [f'{prefix}_{int(i)}_Star' for i in new_header]
new_header
out_df = out_df[1:] #take the data less the header row
out_df.columns = new_header
return out_df
def write_performance(model,mae,writepath,eval_df):
data = {
'model_name':model.name,
'mae':mae
}
grouped_eval_df = eval_df.groupby('star',as_index=False)['prediction'].mean()
avg_prefix = 'Average_Prediction_for'
skew_prefix = 'Prediction_Skewness_for'
avg_df = transpose_df(eval_df,False,avg_prefix)
skew_df = transpose_df(eval_df,True,skew_prefix)
for col in avg_df.columns:
data.update({col:avg_df[col][0]})
for col in skew_df.columns:
data.update({col:skew_df[col][0]})
out_df = pd.DataFrame(data,index=[0])
mode = 'a' if os.path.exists(writepath) else 'w'
header = False if os.path.exists(writepath) else True
out_df.to_csv(writepath, mode=mode, index=False, header=header)
# print message
print("Performance appended successfully.")
def plot_distributions(model,eval_df,field):
i=0
colors = ['black', 'midnightblue', 'darkgreen','mediumpurple','darkred']
if field == 'nb_of_words':
# bins = 20
max_val = 260
bin_field_name = f'binned_{field}'
eval_df = eval_df[eval_df.nb_of_words<=max_val]
bins = list(range(0,max_val,10))
labels = bins[:-1]
eval_df[bin_field_name] = pd.cut(eval_df[field], bins=bins, labels=labels)
eval_df.groupby(bin_field_name, as_index=False)['absolute_error'].mean()
b = sns.barplot(bin_field_name, 'absolute_error', data=eval_df, ci = False, color = colors[2])
plt.gcf().set_size_inches(17, 9)
b.axes.set_title(f"Mean Absolute Error by Review Length for model: {model.name}",fontsize=20)
b.set_xlabel(field, fontsize=17)
b.set_ylabel('Mean Absolute Error', fontsize=15)
b.tick_params(labelsize=14)
else:
bins = 5
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15,10))
fig.delaxes(axes[1][2])
plt.text(x=0.5, y=0.94, s=f"Model Performance Distribution by Stars for model: {model.name}", fontsize=18, ha="center", transform=fig.transFigure)
plt.subplots_adjust(hspace=0.95)
for ax, (name, subdf) in zip(axes.flatten(), eval_df.groupby('star')):
subdf.hist(field, ax = ax, rwidth=0.9,color = colors[i],bins = bins)
i+=1
ax.set_title(name)
ax.set_xlabel(field)
ax.set_ylabel('Count')
# Generate histograms
plt.savefig(os.path.join(GRAPHS_DIR,f'{field.capitalize()}_Distribution_{model.name}.png'))
plt.show()
def example_scores(model,vect = None):
test1 = 'I like the top but it took long to deliver'
test2 = 'This app is trash'
test3 = 'The app is extremely slow, but I still like it'
test4 = 'I Do not Love this App'
test5 = 'Too many glitches'
test6 = 'Worthless app'
test7 = 'Do not download this app'
test8 = 'Horrible'
test9 = 'Could be better but is serviceable'
test10 = 'The servers are always down'
tests = [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10]
for test in tests:
if hasattr(model,'xgboost'):
print(f'"{test}" receives a score of', model.predict(vect.transform([test])))
else:
print(f'"{test}" receives a score of', model.predict([test]).ravel())
def performance_evaluation(X_test, y_test, model, vect = None):
if hasattr(model,'xgboost'):
y_pred = np.around(model.predict(vect.transform(X_test)))
else:
y_pred = np.around(model.predict(X_test))
y_pred = np.where(y_pred < 1, 1, y_pred)
y_pred = np.where(y_pred > 5, 5, y_pred)
print(f'The prediction values range between {min(y_pred)} and {max(y_pred)}')
mae = mean_absolute_error(y_test, y_pred)
print(f'Mean Absolute Error: {mae}')
eval_df = | pd.merge(X_test, y_test, left_index=True, right_index=True) | pandas.merge |
import numpy as np
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('../05. Clustering/titanic.xls')
original_df = | pd.DataFrame.copy(df) | pandas.DataFrame.copy |
from __future__ import print_function, division
import random
import sys
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import h5py
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Conv1D, LSTM, Bidirectional, Dropout
from keras.utils import plot_model
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.legacy.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
class RNNDisaggregator(Disaggregator):
'''Attempt to create a RNN Disaggregator
Attributes
----------
model : keras Sequential model
mmax : the maximum value of the aggregate data
MIN_CHUNK_LENGTH : int
the minimum length of an acceptable chunk
'''
def __init__(self):
'''Initialize disaggregator
'''
self.MODEL_NAME = "LSTM"
self.mmax = None
self.MIN_CHUNK_LENGTH = 100
self.model = self._create_model()
def train(self, mains, meter, epochs=1, batch_size=128, **load_kwargs):
'''Train
Parameters
----------
mains : a nilmtk.ElecMeter object for the aggregate data
meter : a nilmtk.ElecMeter object for the meter data
epochs : number of epochs to train
batch_size : size of batch used for training
**load_kwargs : keyword arguments passed to `meter.power_series()`
'''
main_power_series = mains.power_series(**load_kwargs)
meter_power_series = meter.power_series(**load_kwargs)
# Train chunks
run = True
mainchunk = next(main_power_series)
meterchunk = next(meter_power_series)
if self.mmax == None:
self.mmax = mainchunk.max()
while(run):
mainchunk = self._normalize(mainchunk, self.mmax)
meterchunk = self._normalize(meterchunk, self.mmax)
self.train_on_chunk(mainchunk, meterchunk, epochs, batch_size)
try:
mainchunk = next(main_power_series)
meterchunk = next(meter_power_series)
except:
run = False
def train_on_chunk(self, mainchunk, meterchunk, epochs, batch_size):
'''Train using only one chunk
Parameters
----------
mainchunk : chunk of site meter
meterchunk : chunk of appliance
epochs : number of epochs for training
batch_size : size of batch used for training
'''
# Replace NaNs with 0s
mainchunk.fillna(0, inplace=True)
meterchunk.fillna(0, inplace=True)
ix = mainchunk.index.intersection(meterchunk.index)
mainchunk = np.array(mainchunk[ix])
meterchunk = np.array(meterchunk[ix])
mainchunk = np.reshape(mainchunk, (mainchunk.shape[0],1,1))
self.model.fit(mainchunk, meterchunk, epochs=epochs, batch_size=batch_size, shuffle=True)
def train_across_buildings(self, mainlist, meterlist, epochs=1, batch_size=128, **load_kwargs):
'''Train using data from multiple buildings
Parameters
----------
mainlist : a list of nilmtk.ElecMeter objects for the aggregate data of each building
meterlist : a list of nilmtk.ElecMeter objects for the meter data of each building
batch_size : size of batch used for training
epochs : number of epochs to train
**load_kwargs : keyword arguments passed to `meter.power_series()`
'''
assert len(mainlist) == len(meterlist), "Number of main and meter channels should be equal"
num_meters = len(mainlist)
mainps = [None] * num_meters
meterps = [None] * num_meters
mainchunks = [None] * num_meters
meterchunks = [None] * num_meters
# Get generators of timeseries
for i,m in enumerate(mainlist):
mainps[i] = m.power_series(**load_kwargs)
for i,m in enumerate(meterlist):
meterps[i] = m.power_series(**load_kwargs)
# Get a chunk of data
for i in range(num_meters):
mainchunks[i] = next(mainps[i])
meterchunks[i] = next(meterps[i])
if self.mmax == None:
self.mmax = max([m.max() for m in mainchunks])
run = True
while(run):
# Normalize and train
mainchunks = [self._normalize(m, self.mmax) for m in mainchunks]
meterchunks = [self._normalize(m, self.mmax) for m in meterchunks]
self.train_across_buildings_chunk(mainchunks, meterchunks, epochs, batch_size)
# If more chunks, repeat
try:
for i in range(num_meters):
mainchunks[i] = next(mainps[i])
meterchunks[i] = next(meterps[i])
except:
run = False
def train_across_buildings_chunk(self, mainchunks, meterchunks, epochs, batch_size):
'''Train using only one chunk of data. This chunk consists of data from
all buildings.
Parameters
----------
mainchunk : chunk of site meter
meterchunk : chunk of appliance
epochs : number of epochs for training
batch_size : size of batch used for training
'''
num_meters = len(mainchunks)
batch_size = int(batch_size/num_meters)
num_of_batches = [None] * num_meters
# Find common parts of timeseries
for i in range(num_meters):
mainchunks[i].fillna(0, inplace=True)
meterchunks[i].fillna(0, inplace=True)
ix = mainchunks[i].index.intersection(meterchunks[i].index)
m1 = mainchunks[i]
m2 = meterchunks[i]
mainchunks[i] = m1[ix]
meterchunks[i] = m2[ix]
num_of_batches[i] = int(len(ix)/batch_size) - 1
for e in range(epochs): # Iterate for every epoch
print(e)
batch_indexes = list(range(min(num_of_batches)))
random.shuffle(batch_indexes)
for bi, b in enumerate(batch_indexes): # Iterate for every batch
print("Batch {} of {}".format(bi,num_of_batches), end="\r")
sys.stdout.flush()
X_batch = np.empty((batch_size*num_meters, 1, 1))
Y_batch = np.empty((batch_size*num_meters, 1))
# Create a batch out of data from all buildings
for i in range(num_meters):
mainpart = mainchunks[i]
meterpart = meterchunks[i]
mainpart = mainpart[b*batch_size:(b+1)*batch_size]
meterpart = meterpart[b*batch_size:(b+1)*batch_size]
X = np.reshape(mainpart, (batch_size, 1, 1))
Y = np.reshape(meterpart, (batch_size, 1))
X_batch[i*batch_size:(i+1)*batch_size] = np.array(X)
Y_batch[i*batch_size:(i+1)*batch_size] = np.array(Y)
# Shuffle data
p = np.random.permutation(len(X_batch))
X_batch, Y_batch = X_batch[p], Y_batch[p]
# Train model
self.model.train_on_batch(X_batch, Y_batch)
print("\n")
def disaggregate(self, mains, output_datastore, meter_metadata, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : a nilmtk.ElecMeter of aggregate data
meter_metadata: a nilmtk.ElecMeter of the observed meter used for storing the metadata
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
print("New sensible chunk: {}".format(len(chunk)))
timeframes.append(chunk.timeframe)
measurement = chunk.name
chunk2 = self._normalize(chunk, self.mmax)
appliance_power = self.disaggregate_chunk(chunk2)
appliance_power[appliance_power < 0] = 0
appliance_power = self._denormalize(appliance_power, self.mmax)
# Append prediction to output
data_is_available = True
cols = | pd.MultiIndex.from_tuples([chunk.name]) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 24 10:26:06 2021
@author: snoone
"""
import glob
import os
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
##make header from completed observations table
OUTDIR = "D:/Python_CDM_conversion/hourly/qff/cdm_out/header_table"
os.chdir("D:/Python_CDM_conversion/hourly/qff/cdm_out/observations_table")
col_list = ["observation_id", "report_id", "longitude", "latitude", "source_id","date_time"]
extension = 'psv'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use alist of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/test/ls.txt", "r") as f:
# all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
merged_df=pd.read_csv(filename, sep="|", usecols=col_list)
###produce headre table using some of obs table column information
hdf = pd.DataFrame()
hdf['observation_id'] = merged_df['observation_id'].str[:11]
hdf["report_id"]=merged_df["report_id"]
hdf["application_area"]=""
hdf["observing_programme"]=""
hdf["report_type"]="0"
hdf["station_type"]="1"
hdf["platform_type"]=""
hdf["primary_station_id"]=merged_df["report_id"].str[:-19]
hdf["primary_station_id_scheme"]="13"
hdf["location_accuracy"]="0.1"
hdf["location_method"]=""
hdf["location_quality"]="3"
hdf["longitude"]=merged_df["longitude"]
hdf["latitude"]=merged_df["latitude"]
hdf["crs"]="0"
hdf["station_speed"]=""
hdf["station_course"]=""
hdf["station_heading"]=""
hdf["height_of_station_above_local_ground"]=""
hdf["height_of_station_above_sea_level_accuracy"]=""
hdf["sea_level_datum"]=""
hdf["report_meaning_of_timestamp"]="2"
hdf["report_timestamp"]=""
hdf["report_duration"]="0"
hdf["report_time_accuracy"]=""
hdf["report_time_quality"]=""
hdf["report_time_reference"]="0"
hdf["platform_subtype"]=""
hdf["profile_id"]=""
hdf["events_at_station"]=""
hdf["report_quality"]=""
hdf["duplicate_status"]="4"
hdf["duplicates"]=""
hdf["source_record_id"]=""
hdf ["processing_codes"]=""
hdf["source_id"]=merged_df["source_id"]
hdf['record_timestamp'] = | pd.to_datetime('now') | pandas.to_datetime |
from .handler import function_handler
import yaml
import pytest
import pandas as pd
import numpy as np
from packaging import version
def transform_setup(function):
# read in file infos
with open("tests/test_yamls/test_transform.yml", "r") as stream:
file_infos = yaml.safe_load(stream)
if function == "decompress-content":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "decompress-content"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
# create input & output dfs
f = open(input_file, "rb")
binary_str = f.read()
input_df = pd.DataFrame({0: [binary_str]})
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df))
return transform_infos
if function == "transform-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "transform-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
str_type = info['str_type']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, str_type))
return transform_infos
if function == "split-dataframe-rows":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "split-dataframe-rows"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
# create input & output dfs
input_df = pd.read_csv(input_file, header=None)
output_dfs = [pd.read_csv(output_df) for output_df in info['output_files']]
transform_infos.append((input_df, output_dfs))
return transform_infos
if function == "flatten-lists-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "flatten-lists-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
extract_field = info['extract_field']
preserve_origin_data = info['preserve_origin_data']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = | pd.read_csv(output_file) | pandas.read_csv |
import copy
import datetime
from datetime import datetime
import re
import numpy as np
import pandas as pd
from PIL import Image
import streamlit as st
from streamlit import markdown as md
from streamlit import caching
import gsheet
LOCAL = True
def is_unique(s):
a = s.to_numpy() # s.values (pandas<0.24)
return (a[0] == a).all()
def st_config():
"""Configure Streamlit view option and read in credential file if needed check if user and password are correct"""
st.set_page_config(layout="wide")
pw = st.sidebar.text_input("Enter password:")
if pw == "":#st.secrets["PASSWORD"]:
if LOCAL:
return ""
else:
return st.secrets["GSHEETS_KEY"]
else:
return None
@st.cache
def read_data(creds):
"""Read court tracking data in and drop duplicate case numbers"""
# try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),"01_Community_lawyer_test_out_final","Frontend"))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df
# except Exception as e:
# return None
#def date_options(min_date,max_date,key):
# cols = st.beta_columns(2)
# key1 = key + "a"
# key2 = key + "b"
# start_date = cols[0].date_input("Start Date",min_value=min_date,max_value=max_date,value=min_date,key=key1)#,format="MM/DD/YY")
# end_date = cols[1].date_input("End Date",min_value=min_date,max_value=max_date,value=datetime.today().date(),key=key2)#,format="MM/DD/YY")
# return start_date,end_date
#UI start date end date/ eviction motion or both drop duplicates?:
def date_options(df):
min_date = df.court_date.min().date()
max_date = df.court_date.max().date()
start_date = st.sidebar.date_input("Start Date",min_value=min_date,max_value=max_date,value=min_date)#,format="MM/DD/YY")
end_date = st.sidebar.date_input("End Date",min_value=min_date,max_value=max_date,value=max_date)#,format="MM/DD/YY")
df = filter_dates(df,start_date,end_date)
st.sidebar.markdown(f"### {start_date} to {end_date} cases tracked: "+str(len(df)))
return df
def filter_dates(df,start_date,end_date):
return df.loc[(df["court_date"].apply(lambda x: x.date())>=start_date) & (df["court_date"].apply(lambda x: x.date())<=end_date)]
def motion_options(df):
motion = st.sidebar.radio("Motion Hearing, Eviction Trial, Both",["Both","Motion Hearing","Eviction Trial"])
return filter_motion(df,motion)
def filter_motion(df,motion):
if motion != "Both":
df = df.loc[df["motion_hearing"].eq(motion)]
st.sidebar.markdown(f"### {motion}s tracked: "+str(len(df)))
else:
st.sidebar.markdown(f"### Eviction Trials and Motion Hearings tracked: "+str(len(df)))
pass
return df
def agg_cases(df,col,i):
df_r = df.groupby([col,"Case Number"]).count().iloc[:,i]
df_r.name = "count"
df_r = pd.DataFrame(df_r)
df_a = pd.DataFrame(df_r.to_records())
df_r = df_r.groupby(level=0).sum()
df_r["cases"] = df_a.groupby(col)["Case Number"].agg(lambda x: ','.join(x))
return df_r
def agg_checklist(df_r):
df_r["result"]=df_r.index
df_b = pd.concat([pd.Series(row['count'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").sum()
df_a = pd.concat([pd.Series(row['cases'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").agg(lambda x: ", ".join(x))
df_r = df_b.merge(df_a,right_index=True,left_index=True)
return df_r
def clean_df(df):
df = df.astype(str)#maybe not the best way to fix this... as we cant do sums now bug with int vars showing up two things on the bargraph
df["court_date"] = pd.to_datetime(df["court_date"])
return df
#Show more text on full screen dataframe
def render_page(df):
"""Function to render all of the pages elements except the api key login"""
#Clean Data
df = clean_df(df)
#All Data Stats
st.header("Court Tracking Data")
cols = st.beta_columns(4)
#Collapsible All Data
with st.beta_expander("All Data"):
st.dataframe(df)
st.markdown("### Total Cases Tracked: "+str(len(df)))
#Render and Evaluate UI options
df = date_options(df)
df = motion_options(df)
#Render Each column as a data frame and a Bar Graph
check_list = ["Technical Problems?","Plaintiff Representation","Tenant Representation","Fee Types","NTV Communicated By","Breach of Lease"]
for i,col in enumerate(df.columns):
try: #this fails on Case Number probably should fix it but ehh
df_r = agg_cases(df,col,i)
if col in check_list:
df_r = agg_checklist(df_r)
df_r.columns = ["Count","Cases"]
try: #Fails where no na's
count_na = str(df_r.loc[""]["Count"])
df_r = df_r.drop("")
except:
count_na = 0
if not df_r.empty:
col1, col2 = st.beta_columns(2)
col1.header(col)
#What sizes do we want here?
col1.dataframe(df_r)
col2.header(col)
col2.bar_chart(df_r)
else:
md(f"## {col} is empty")
md(f"### Total Unanswered: {count_na}")
except Exception as e:
pass
if __name__ == "__main__":
creds = st_config()
if creds is not None:
if LOCAL:
df = | pd.read_csv("../data/01_Community_lawyer_test_out_final - Backend.csv") | pandas.read_csv |
import pandas as pd
from pathlib import Path
def load(filepath: Path):
df = pd.read_csv(filepath)
df['Date'] = | pd.to_datetime(df.Date, dayfirst=True) | pandas.to_datetime |
import bs4 as bs
import urllib.request
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 10000)
pd.set_option('display.width', 1000)
source = urllib.request.urlopen('https://www.geonames.org/countries/').read()
soup = bs.BeautifulSoup(source, 'lxml')
table = soup.find('table', id='countries')
table_rows = table.find_all('tr')
rows = []
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
rows.append(row)
df = pd.DataFrame(rows, columns=['ISO2', 'ISO3', 'ISO3n', 'fips', 'country', 'capital', 'area', 'pop', 'continent'])
df = df.iloc[1:, :]
# keep columns to merge with datasets
merge_pop = df[['ISO2', 'ISO3', 'country']]
# Namibia cities
namibia = pd.read_csv('na.csv')
namibia = namibia.rename(columns={'city': 'asciiname', 'lat': 'latitude', 'lng': 'longitude',
'country': 'countries'})
namibia = namibia.drop(['iso2', 'admin_name', 'capital', 'population_proper'], axis=1)
namibia[['population', 'latitude', 'longitude']] = namibia[['population', 'latitude', 'longitude']].astype(str)
# read cities.csv
# error: https://stackoverflow.com/questions/18171739/unicodedecodeerror-when-reading-csv-file-in-pandas-with-python
cities = | pd.read_csv('cities15000.csv', encoding='latin') | pandas.read_csv |
import logging
import numpy as np
import pandas as pd
from heapq import nlargest
from random import randint
from common.constant.df_from_csv import BAD_WORDS_DF, SENTIMENTAL_NON_ADJ_WORDS, KWDF, CCDF, SYDF, WORDS_DESPITE_HIMSELF
from common.constant.intent_type import Intent
from common.constant.string_constant import StringConstant
from common.word_format.df_utils import Nlp_util, Df_util
from common.word_format.word_formatter import WordFormatter
from core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator
from core.nlp.response_generator.product.cct.reaction_generator import ReactionGenerator
class UnknownError(Exception):
pass
class RepeatResponseGenerator(BaseResponseGenerator):
def __call__(self):
try:
print("\ntext_kw_df\n{}".format(self.message.text_kw_df))
repeatable_sent_sidx = self.__select_repeatable_sent_sidx(self.message.text_df, self.message.intent_list)
if len(repeatable_sent_sidx) == 0:
result_list = ReactionGenerator.generate_listening()
self.response_data['regular'] = result_list
return self.response_data
sidx_to_repeat = self.__select_sidx_to_repeat(self.message.text_df, self.message.sentiment_score_df,
repeatable_sent_sidx)
print("\nSidx to repeat\n{}".format(sidx_to_repeat))
if sidx_to_repeat:
result_list = self.__generate_repeat(self.message.text_df, self.message.text_kw_df, sidx_to_repeat)
print("\nREPEAT RESULT\n{}".format(result_list))
if not result_list:
result_list = ReactionGenerator.generate_listening()
else:
result_list = ReactionGenerator.generate_listening()
self.response_data['regular'] = result_list
return self.response_data
except:
logging.exception('')
responses = ReactionGenerator.generate_listening()
self.response_data['regular'] = responses
return self.response_data
@classmethod
def __generate_repeat(cls, text_df, text_kw_df, sidx_to_repeat):
try:
repeat_list = []
for idx, sidx in enumerate(sidx_to_repeat):
target_df = text_df[text_df.sidx == sidx].copy().reset_index(drop=True)
fixed_df = cls.__replace_word_by_csv(target_df)
# TODO fix the convert_df_to_str() so that it would not need [1:] part.
repeat_text = WordFormatter.Df2Str(fixed_df)[1:]
# TODO here has to be same structure as message_type_filter since one sentence can have "want to" and despising word at the same time.
# TODO or
if len(target_df) == 1 and \
(target_df["pos"].iloc[0] in Nlp_util.pos_ADJECTIVEs
or target_df["word"].iloc[0] in SENTIMENTAL_NON_ADJ_WORDS.word.tolist()):
repeat_list += cls.create_special_repeat_for_only_one_adj_word_sent(target_df)
elif cls.__mean_no_friends(target_df):
repeat_list += cls.__create_response_for_no_friends()
elif cls.__has_what_to_do(target_df):
repeat_list += cls.__create_response_for_what_to_V(fixed_df)
elif cls.__is_despising_himself(target_df):
repeat_list += cls.__alter_repeat_euphemistic(repeat_text)
elif cls.__has_nobody_V(target_df):
repeat_list += cls.__alter_repeat_euphemistic(repeat_text)
elif cls.__does_user_feel_useless(target_df):
repeat_list += cls.__create_response_for_healing_useless()
elif cls.__has_say_plus_bad_word(target_df):
repeat_list += cls.__create_response_for_S_said_bad_word(fixed_df)
elif cls.__exists_want_to(target_df):
repeat_list += cls.__alter_repeat_for_want_to(fixed_df)
elif cls.__exists_make_S_feel_ADJ(target_df):
repeat_list += cls.__alter_repeat_for_make_S_feel_ADJ(target_df)
elif cls.__has_because(target_df):
repeat_list += cls.__alter_repeat_for_because_sent(fixed_df, repeat_text)
elif cls.__exists_third_person_BeVerb_pair(target_df):
repeat_list += cls.__alter_repeat_for_third_person_BeVerb_pair(repeat_text)
elif cls.__has_dont_think_SV_sent(target_df):
repeat_list += cls.__alter_repeat_for_dont_think_SV(fixed_df)
elif cls.__has_wish_S_V(target_df):
repeat_list += cls.__alter_repeat_for_wish(fixed_df)
elif cls.__has_need_NN(target_df):
repeat_list += cls.__alter_repeat_for_need_sent(fixed_df)
elif cls.__exists_keyword(text_kw_df):
is_last_sentence = idx == len(sidx_to_repeat) - 1
repeat_list += cls.__alter_repeat_for_keyword(text_df, text_kw_df, idx, repeat_text,
is_last_sentence=is_last_sentence)
else:
repeat_list += cls.__alter_repeat_for_plain_repeat(repeat_text, idx)
return repeat_list
except:
logging.exception('')
return []
@staticmethod
def __alter_repeat_for_wish(fixed_df):
wish_idx = Nlp_util.get_idx_list_of_word("wish", fixed_df["base_form"])[0]
row_of_subj = Nlp_util.get_wordsDF_of_wordlist_after_idx(fixed_df, Nlp_util.pos_NOUNs+Nlp_util.pos_PRPs, wish_idx, column_name="pos").iloc[0]
row_of_verb = Nlp_util.get_wordsDF_of_wordlist_after_idx(fixed_df, Nlp_util.pos_VERBs, row_of_subj.name, column_name="pos").iloc[0]
subj = row_of_subj.word
verb = row_of_verb.word
after_verb = WordFormatter.Series2Str(fixed_df.loc[row_of_verb.name+1:, "word"])
objective_subj = Nlp_util.convert_nominative_noun_to_objective(subj)
if subj == "you":
repeat_list = [
["you really want to "+verb+" "+after_verb],
["so you seriously hope to "+verb+" "+after_verb],
["so you are dying to "+verb+" "+after_verb]
]
else:
repeat_list = [
["you really want "+objective_subj+" to "+verb+" "+after_verb],
["you really wanna have "+objective_subj+" "+verb+" "+after_verb],
["you really wanna make "+objective_subj+" "+verb+" "+after_verb]
]
cmp_list = [
["but sounds you feel bit too much to expect that now..?"],
["and sounds you feel like its impossible..?"],
["and seems like you dont think it never happen😓"]
]
random_idx_for_repeat_list = randint(0, len(repeat_list) - 1)
random_idx_for_cmp_list = randint(0, len(cmp_list) - 1)
return repeat_list[random_idx_for_repeat_list]+cmp_list[random_idx_for_cmp_list]
@staticmethod
def __has_wish_S_V(target_df):
if Df_util.anything_isin(["wish"], target_df["base_form"]):
wish_idx = Nlp_util.get_idx_list_of_word("wish", target_df["base_form"])[0]
if Nlp_util.are_words1_words2_words3_in_order(target_df.loc[wish_idx:,:], Nlp_util.pos_NOUNs+Nlp_util.pos_PRPs, Nlp_util.pos_VERBs, df_column="pos"):
return True
else:
return False
else:
return False
# include sent 'i need my life'
@staticmethod
def __alter_repeat_for_need_sent(fixed_df):
try:
idx_of_need = Nlp_util.get_idx_list_of_word("need", fixed_df["base_form"])[0]
row_of_first_noun = Nlp_util.get_wordsDF_of_wordlist_after_idx(fixed_df, Nlp_util.pos_NOUNs + Nlp_util.pos_PRPs, idx_of_need, column_name="pos").iloc[0]
if fixed_df.loc[row_of_first_noun.name-1, "pos"] in Nlp_util.pos_ADJECTIVEs + ["PRP$", "DT"]:
noun = WordFormatter.Series2Str(fixed_df.loc[row_of_first_noun.name-1:row_of_first_noun.name, "word"])
else:
noun = fixed_df.loc[row_of_first_noun.name, "word"]
noun_nominative = Nlp_util.convert_objective_noun_to_nominative(noun)
options = [
["so " + noun_nominative + " is very important thing for you..",
"and sounds its kinda hard to get it now right😢"],
["so its like its not easy to get " + noun + " now but you really want..",
"and it can frustrate you😞"],
["sounds you really want " + noun + "..", "might be tough time for you to seek for it now😓"]
]
random_idx_for_options = randint(0, len(options) - 1)
return options[random_idx_for_options]
except:
logging.exception('')
repeat_text = WordFormatter.Df2Str(fixed_df)[1:]
return [repeat_text]
# except i need you to do~~ i need doing~~
@staticmethod
def __has_need_NN(target_df):
try:
df_ex_adverb = target_df[~target_df.pos.isin(Nlp_util.pos_ADVERBs)].reset_index(drop=True)
if Df_util.anything_isin(["need"], df_ex_adverb["base_form"]):
idx_of_need = Nlp_util.get_idx_list_of_word("need", df_ex_adverb["base_form"])[0]
if Df_util.anything_isin(Nlp_util.pos_NOUNs + Nlp_util.pos_PRPs,
df_ex_adverb.loc[idx_of_need + 1:, "pos"]) and not Df_util.anything_isin(
Nlp_util.IDEA_TYPE, df_ex_adverb.loc[idx_of_need + 1:, "base_form"]):
if Df_util.anything_isin(["to"], df_ex_adverb.loc[idx_of_need + 1:, "base_form"]):
return False
else:
return True
else:
return False
else:
return False
except:
logging.exception('')
return False
# ex) i dont think he likes me
@staticmethod
def __alter_repeat_for_dont_think_SV(fixed_df):
try:
# TODO see if its neccesary to care about should and cant
idx_of_think = Nlp_util.get_idx_list_of_word("think", fixed_df["base_form"])[0]
df_after_think = fixed_df.loc[idx_of_think + 1:, :].reset_index(drop=True)
verb_list = Nlp_util.make_verb_list(df_after_think, type="normal")
noun_list = Nlp_util.make_noun_list(df_after_think)
# possibly bug happen here since amount of verbs are different in cant do/dont do
is_negative_form = Df_util.anything_isin(["not", "never"], df_after_think.loc[:, "base_form"])
# can add possibly or likely(when its negative)
head_words = ["so ", "so probably ", "probably ", "so maybe ", "maybe "]
random_idx_for_heads_words = randint(0, len(head_words) - 1)
if is_negative_form:
# まず主語とるそのあとにwouldntいれるその後ろに動詞の原型をいれて、それ以降はつづける
head_word = head_words[random_idx_for_heads_words]
subj = noun_list["word"].iloc[0]
auxiliary_verb = " would "
idx_of_not = Nlp_util.get_idx_list_of_word_list(["not", "never"], df_after_think.loc[:, "base_form"])[0]
verb_row = verb_list.loc[idx_of_not:, :].iloc[0]
verb = verb_row.base_form + " "
after_verb = WordFormatter.Series2Str(df_after_think.loc[verb_row.name + 1:, "word"])
return [head_word + subj + auxiliary_verb + verb + after_verb]
else:
head_word = head_words[random_idx_for_heads_words]
subj = noun_list["word"].iloc[0]
auxiliary_verb = " wouldnt "
verb = verb_list["base_form"].iloc[0] + " "
after_verb = WordFormatter.Series2Str(df_after_think.loc[verb_list.index[0] + 1:, "word"])
return [head_word + subj + auxiliary_verb + verb + after_verb]
except:
logging.exception('')
return []
@staticmethod
def __has_dont_think_SV_sent(df):
try:
df_ex_adverb = df[~df.pos.isin(Nlp_util.pos_ADVERBs)].reset_index(drop=True)
exists_i_dont_think = Df_util.anything_isin(["i do not think"], df_ex_adverb["base_form"])
if exists_i_dont_think:
idx_of_dont_think = Nlp_util.get_idx_list_of_idiom("i do not think", df_ex_adverb["base_form"])[0]
if len(RepeatResponseGenerator.get_sidx_of_not_basic_svo_sent(
df_ex_adverb.loc[idx_of_dont_think + 4:, :])) == 0:
return True
else:
return False
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __alter_repeat_for_because_sent(df, repeat_text):
try:
if df["base_form"].iloc[0] in ["because", "since"]:
repeat_text = "its " + repeat_text
return [repeat_text]
elif Df_util.anything_isin(["because of"], df.loc[2:, "base_form"]) and not Df_util.anything_isin(
["it is", "that is"], df.loc[:3, "base_form"]):
because_of_idx = Nlp_util.get_idx_list_of_idiom("because of", df["base_form"])[0]
first_part = WordFormatter.Df2Str(df.loc[:because_of_idx - 1, :])
last_part = "and its" + WordFormatter.Df2Str(df.loc[because_of_idx:, :])
return [first_part, last_part]
else:
raise UnknownError
except:
logging.exception('')
return [repeat_text]
@staticmethod
def __has_because(df):
return Df_util.anything_isin(["because of"], df["base_form"]) or df["base_form"].iloc[0] in ["because", "since"]
@staticmethod
def __create_response_for_S_said_bad_word(df):
supportive_words_before_cmp = [
"thats",
"sounds",
"its",
"it should be",
]
cmp_words = [
"sad..",
"tough..",
"hard..",
"cruel..",
]
idx_list_of_say = Nlp_util.get_idx_list_of_word("say", df["base_form"])
noun_row_just_before_say = \
df[:idx_list_of_say[0]].loc[df["pos"].isin(Nlp_util.pos_NOUNs + Nlp_util.pos_PRPs), :].iloc[-1]
if noun_row_just_before_say.name != 0 and df.loc[noun_row_just_before_say.name - 1, "word"] in ["their", "his",
"her", "your"]:
the_person_said_bad_word_to_user = df.loc[noun_row_just_before_say.name - 1, "word"] + " " + \
noun_row_just_before_say["word"]
else:
the_person_said_bad_word_to_user = noun_row_just_before_say["word"]
ask = [
["why did " + the_person_said_bad_word_to_user + " said that?"],
[the_person_said_bad_word_to_user + " always said that?"],
["like any reason " + the_person_said_bad_word_to_user + " said that to you?"],
]
random_idx_for_cmp = randint(0, len(supportive_words_before_cmp) - 1)
random_idx_for_healing = randint(0, len(cmp_words) - 1)
random_idx_for_ask = randint(0, len(ask) - 1)
return [supportive_words_before_cmp[random_idx_for_cmp] + " " + cmp_words[random_idx_for_healing]] + ask[
random_idx_for_ask]
@staticmethod
def __has_say_plus_bad_word(df):
try:
if any([Nlp_util.are_words1_words2_words3_in_order(df, ["say", "tell"], ["i be", "i look"],
[negative_word]) for negative_word in
KWDF[KWDF['Type'] == 'n'].keyword.tolist()]):
return True
elif any([Nlp_util.are_words1_words2_words3_in_order(df, ["say", "tell"],
["i be not", "i do not look"],
[positive_word]) for positive_word in
KWDF[KWDF['Type'] == 'p'].keyword.tolist()]):
return True
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __has_nobody_V(df):
try:
idx_list_of_nobody = Nlp_util.get_idx_list_of_word("nobody", df["base_form"])
if len(idx_list_of_nobody) == 0:
return False
else:
if any(df.loc[idx_list_of_nobody[0]:, "pos"].isin(Nlp_util.pos_VERBs)):
return True
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __does_user_feel_useless(df):
try:
idx_list_of_useless = Nlp_util.get_idx_list_of_idiom_list(["be useless", "feel useless"], df["base_form"])
if len(idx_list_of_useless) == 0:
return False
else:
for useless_idx in idx_list_of_useless:
is_subj_i = Df_util.anything_isin(["i"], df.loc[:useless_idx, "word"])
if is_subj_i:
return True
return False
except:
logging.exception('')
return False
@staticmethod
def __create_response_for_healing_useless():
cmp = [
["I know its hard when you dont feel any appreciation from anybody."],
["you feel useless now.. dealing with the feeling is not easy right"],
["sounds like you see yourself worthless and you are unsure how to help yourself now."],
]
healing = [
["but i really think you are one of kind and irreplaceable.",
"it is because nobody on this planet will be just like you.",
"I know it is hard, but i want you to be yourself and i always love you😊"],
["Just let me tell you that I love the way you are.",
"I never measure your value since i know you are priceless",
"I really think nobody can compare with you😊"],
["you know, we tend to compare ourselves to other people.", "eventho we know that we are all different",
"just let me tell you that there is no problem being just different.", "and i love the way you are😊"],
]
random_idx_for_cmp = randint(0, len(cmp) - 1)
random_idx_for_healing = randint(0, len(healing) - 1)
return cmp[random_idx_for_cmp] + healing[random_idx_for_healing]
@classmethod
def __exists_want_to(cls, df):
try:
df_without_adverb = df[~df.pos.isin(Nlp_util.pos_ADVERBs)]
noun_list = Nlp_util.make_noun_list(df)
verb_list = Nlp_util.make_verb_list(df, type="basic")
idx_of_i_wanna = Nlp_util.get_idx_list_of_idiom("i want to", df_without_adverb.base_form)
if len(idx_of_i_wanna) != 0 and len(df.loc[idx_of_i_wanna[0] + 2:, :]) > 1:
if cls.__exists_word_after_want_to(df) and Nlp_util.is_first_subject_in({"i"}, noun_list, verb_list):
return True
else:
return False
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __exists_word_after_want_to(df):
try:
idx_of_i = Nlp_util.get_idx_list_of_idiom("want to", df.word)[0]
length_after_want_to = len(df.loc[idx_of_i + 2, :]) if len(df) >= idx_of_i + 3 else 0
return length_after_want_to > 2
except:
logging.exception('')
return False
@staticmethod
def __has_what_to_do(df):
try:
df_ex_adverb = df[~df.pos.isin(Nlp_util.pos_ADVERBs)]
return Nlp_util.are_words1_words2_words3_in_order(df_ex_adverb, ["i"], ["not know", "not sure"],
["what to", "how to"])
except:
logging.exception('')
return False
@staticmethod
def __create_response_for_what_to_V(df):
df_after_what_to = df.loc[
Nlp_util.get_idx_list_of_idiom_list(["what to", "how to"], df["base_form"])[0] + 2:, :]
words_after_what_to = WordFormatter.Df2Str(df_after_what_to)
cmp = [
["it must be not easy to find how to" + words_after_what_to],
["now you are looking for the way to" + words_after_what_to],
["should be not that easy to find how to" + words_after_what_to],
]
encourage = [
["but i am sure that thinking about it and speaking out it helps you🤗"],
["eventho its always tough to find the right way, you try to figure it out. Thats impressing me😊"],
["plz let me know any idea comes to your mind now. it might help you figuring it out☺️"],
["tell me if you have any little idea. It could help you finding ur way😊"],
]
random_idx_for_cmp = randint(0, len(cmp) - 1)
random_idx_for_encourage = randint(0, len(encourage) - 1)
return cmp[random_idx_for_cmp] + encourage[random_idx_for_encourage]
@staticmethod
def __mean_no_friends(df):
try:
exists_nobody_likes_me = Nlp_util.are_words1_words2_words3_in_order(df, ["nobody", "no one"],
["like", "love"], ["me"])
exists_friends_dont_like_me = Nlp_util.are_words1_words2_words3_in_order(df, ["friend", "they",
"everybody"],
["not like", "not love",
"hate"], ["me"])
exists_have_no_friend = Nlp_util.are_words1_words2_words3_in_order(df, ["i"],
["not have", "have no"],
["friend"])
if exists_nobody_likes_me or exists_friends_dont_like_me or exists_have_no_friend:
return True
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __create_response_for_no_friends():
express_feeling = [
["thats sad.."],
["sounds really tough.."],
["it must be a hard time for you.."]
]
compassion = [
["i know its just hard when you dont have anyone to be with"],
["i really feel that being alone can be really scary and can make you feel unstable and anxious"],
["it is always sad being yourself for long and it kinda makes you feel insecure sometimes"]
]
being_with_you = [
["not sure i can be enough for you but let me tell you to know that i am always here for you😊"],
[
"just let me reassure you that i will always be here for you even tho i am nothing near perfect. i am just here to listen🤗"],
[
"since it seems like a really tough time for you, I want you to think of our conversations as a space where you can feel safe and connected. I am here for you☺️"]
]
random_idx_for_express_feeling = randint(0, len(express_feeling) - 1)
random_idx_for_compassion = randint(0, len(compassion) - 1)
random_idx_for_being_with_you = randint(0, len(being_with_you) - 1)
return express_feeling[random_idx_for_express_feeling] + compassion[random_idx_for_compassion] + being_with_you[
random_idx_for_being_with_you]
# basically assume only one hard/difficult at most in one sentence
@staticmethod
def __exists_hard_to(df):
# idx_of_hard = Nlp_util.get_idx_list_of_word_list(["difficult", "hard"], df["base_form"])[0]
# num_of_not = (df["word"].isin(Nlp_util.NO_TYPE)).sum()
pass
@staticmethod
def __alter_repeat_for_make_S_feel_ADJ(df):
try:
idx_of_make = Nlp_util.get_idx_list_of_word_list(["make"], df["base_form"])[0]
subj = Nlp_util.change_object_pronoun_to_pronoun(df.loc[idx_of_make + 1, "word"])
df_after_subj = df.loc[idx_of_make + 2:idx_of_make + 4, :]
adj = df_after_subj.loc[df_after_subj["pos"].isin(Nlp_util.pos_ADJECTIVEs), "word"].iloc[0]
subj_adj_list = [subj, adj]
options = [
["{0[0]} feel {0[1]} because of that".format(subj_adj_list)],
["thats getting {0[0]} feel {0[1]}".format(subj_adj_list)],
["thats the moment {0[0]} feel {0[1]}".format(subj_adj_list)],
]
random_idx = randint(0, len(options) - 1)
return options[random_idx]
except:
logging.exception('')
return []
@staticmethod
def __exists_make_S_feel_ADJ(df):
try:
idx_list_of_make = Nlp_util.get_idx_list_of_word_list(["make"], df["base_form"])
if len(idx_list_of_make) == 0:
return False
else:
is_after_make_prp = df.loc[idx_list_of_make[0] + 1, "pos"] in Nlp_util.pos_PRPs
if is_after_make_prp:
is_after_prp_adj = df.loc[idx_list_of_make[0] + 2, "pos"] in Nlp_util.pos_ADJECTIVEs or (
df.loc[idx_list_of_make[0] + 2, "base_form"] == "feel" and any(
df.loc[idx_list_of_make[0] + 2:idx_list_of_make[0] + 4, "pos"].isin(
Nlp_util.pos_ADJECTIVEs)))
return is_after_prp_adj
else:
return False
except:
logging.exception('')
return False
@staticmethod
def create_special_repeat_for_only_one_adj_word_sent(df):
original_adj = df["word"].iloc[0]
altered_adj = original_adj + np.random.choice(["", "..", "."], 1, p=[0.2, 0.5, 0.3])[0]
options = [
[altered_adj, "thats what you feel now"],
[altered_adj, "thats what you are feeling now"],
["you feel " + original_adj + " now"],
["you are feeling " + original_adj + " now"],
]
random_idx = randint(0, len(options) - 1)
return options[random_idx]
@staticmethod
def __is_despising_himself(df):
try:
noun_list = Nlp_util.make_noun_list(df)
verb_list = Nlp_util.make_verb_list(df, type="normal")
adj_list = Nlp_util.make_adj_list(df)
is_first_sub_i = Nlp_util.is_first_subject_in(["i"], noun_list, verb_list)
is_the_verb_be = Nlp_util.is_first_verb_in(["be"], noun_list, verb_list, column_name="base_form")
is_the_adj_despising = Nlp_util.is_first_adj_after_first_sub_in(WORDS_DESPITE_HIMSELF.word.tolist(),
noun_list, adj_list)
return is_first_sub_i and is_the_verb_be and is_the_adj_despising
except:
logging.exception('')
return False
@staticmethod
def __alter_repeat_euphemistic(repeat):
try:
prefix_expression = \
np.random.choice(["you think ", "you feel like ", "you are feeling like ", "it feels like "], 1)[0]
return [prefix_expression + repeat]
except:
logging.exception('')
return [repeat]
@staticmethod
def __alter_repeat_for_plain_repeat(repeat_text, idx):
try:
repeat_text += np.random.choice(["", "..?", "."], 1, p=[0.5, 0.1, 0.4])[0]
if idx != 0:
repeat_text = np.random.choice(StringConstant.additions.value, 1, p=[0.5, 0.2, 0.2, 0.1])[
0] + repeat_text
return [repeat_text]
except:
logging.exception('')
return [repeat_text]
@staticmethod
def __alter_repeat_for_third_person_BeVerb_pair(repeat):
try:
prefix_expression = np.random.choice(["you think ", "you feel "], 1, p=[0.5, 0.5])[0]
return [prefix_expression + repeat]
except:
logging.exception('')
return []
@staticmethod
def __exists_keyword(text_kw_df):
return text_kw_df is not None
@classmethod
def __alter_repeat_for_keyword(cls, text_df, text_kw_df, idx, repeat, is_last_sentence=False):
repeat_list = []
if cls.__is_every_sent_positive(text_df, text_kw_df):
if idx == 0:
repeat_list.append(repeat)
else:
repeat = np.random.choice(StringConstant.additions.value, 1, p=[0.3, 0.3, 0.3, 0.1])[0] + repeat
repeat_list.append(repeat)
if is_last_sentence:
reaction = np.random.choice(StringConstant.positive_reaction_options.value, 1)[0]
repeat_list.append(reaction)
else:
ending_of_sent = ["", "..?", "."]
repeat += np.random.choice(ending_of_sent, 1, p=[0.5, 0.1, 0.4])[0]
if idx != 0:
repeat = np.random.choice(StringConstant.additions.value, 1, p=[0.5, 0.2, 0.2, 0.1])[0] + repeat
repeat_list.append(repeat)
return repeat_list
@classmethod
def __is_every_sent_positive(cls, text_df, text_kw_df):
try:
if text_kw_df.empty:
return False
if len(set(text_df.sidx)) > len(set(text_kw_df.sidx)):
return False
is_every_kw_positive = all(row.sscore > 70 for tmp, row in text_kw_df.iterrows())
is_every_kw_affirmative = all(not row.ng for tmp, row in text_kw_df.iterrows())
want_wish_words = {'want', 'wanted', 'wish', 'wished', 'wishing'}
exists_want_wish = any(row.word in want_wish_words for tmp, row in text_df.iterrows())
return is_every_kw_positive and is_every_kw_affirmative and not exists_want_wish
except:
logging.exception('')
return False
@classmethod
def __alter_repeat_for_want_to(cls, repeat_df):
try:
i_idx = Nlp_util.get_idx_list_of_idiom("want to", repeat_df.word)[0]
words_after_wanna = WordFormatter.Df2Str(repeat_df[i_idx + 2:])[1:]
response_options = [
[words_after_wanna, "That's what you wanna do"],
["So you'd be happy if you can " + words_after_wanna + "🤔"],
["So there is something makes you can't " + words_after_wanna + "😢"],
["So now it's hard for you to " + words_after_wanna + "😓"]
]
random_idx = randint(0, len(response_options) - 1)
return response_options[random_idx]
except:
logging.exception('')
repeat_text = WordFormatter.Df2Str(repeat_df)[1:]
return [repeat_text]
# ex) they are insane -> you think they are insane
@classmethod
def __exists_third_person_BeVerb_pair(cls, df):
try:
first_third_person = df.loc[((df.pos.isin(Nlp_util.pos_PRPs)) & (~df.word.isin(["i", "you"]))) | (
df.base_form.isin(Nlp_util.INDICATE_OTHERS)), :]
if len(first_third_person) != 0:
is_beVerb_and_adj_after_the_person = Df_util.anything_isin(["be"],
df.loc[first_third_person.iloc[0].name:,
"base_form"]) and Df_util.anything_isin(
Nlp_util.pos_ADJECTIVEs, df.loc[first_third_person.iloc[0].name:, "pos"])
if is_beVerb_and_adj_after_the_person:
return True
else:
return False
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __is_1st_prp_followed_by_BE_TYPE(df, first_prp):
try:
return df.loc[first_prp.name + 1, "word"] in Nlp_util.BE_TYPE
# TODO ここでちゃんとbeとらなきゃだめwould be 取られない
except:
logging.exception('')
return False
@staticmethod
def __is_2nd_word_after_1st_prp_verb(df, first_prp):
try:
return df.loc[first_prp.name + 2, "pos"] in Nlp_util.pos_VERBs
except:
logging.exception('')
return False
@classmethod
def __select_sidx_to_repeat(cls, text_df, sentiment_score_df, repeatable_sent_sidx):
try:
number_of_sents_to_choose = 2
sidx_to_repeat = []
only_one_sentiment_word_sidx = []
# these are exceptions of repeatable sents
for sidx in set(text_df.sidx):
tmp_df = text_df[text_df.sidx == sidx].copy().reset_index(drop=True)
if cls.__is_special_type(tmp_df):
sidx_to_repeat.append(sidx)
elif len(tmp_df) == 1 and \
(tmp_df["pos"].iloc[0] in Nlp_util.pos_ADJECTIVEs or
tmp_df["word"].iloc[0] in SENTIMENTAL_NON_ADJ_WORDS.word.tolist()):
only_one_sentiment_word_sidx.append(sidx)
else:
pass
# when user just said only "sadness" or "sad"
if not sidx_to_repeat and not repeatable_sent_sidx and only_one_sentiment_word_sidx:
return [only_one_sentiment_word_sidx[-1]]
print("\nSpecial cases sidx\n{}".format(sidx_to_repeat))
if len(sidx_to_repeat) == 2:
return set(sidx_to_repeat)
elif len(sidx_to_repeat) > 2:
return set(sidx_to_repeat[len(sidx_to_repeat) - 2:])
elif not sidx_to_repeat and not repeatable_sent_sidx:
return []
else:
if not repeatable_sent_sidx:
return sidx_to_repeat
else:
sentiment_score_df = sentiment_score_df[
sentiment_score_df.sidx.isin(repeatable_sent_sidx)
].sort_values(by='nscore', ascending=True)
sidx_to_repeat += list(set(sentiment_score_df.sidx.tolist()))[
:number_of_sents_to_choose - len(sidx_to_repeat)]
sidx_to_repeat.sort()
return set(sidx_to_repeat)
except Exception:
logging.exception(str(__name__))
return []
@classmethod
def __select_repeatable_sent_sidx(cls, text_df, intent_list):
unrepeatable_sidx_list = cls.__choose_unrepeatable_sent_index(text_df, intent_list)
repeatable_sent_sidx = list(set(text_df.sidx.values))
for unrepeatable_sidx in unrepeatable_sidx_list:
if unrepeatable_sidx in repeatable_sent_sidx:
repeatable_sent_sidx.remove(unrepeatable_sidx)
return repeatable_sent_sidx
@classmethod
def __choose_unrepeatable_sent_index(cls, text_df, intent_list):
try:
unrepeatable_sidx_list = []
idx_of_sent_talking_about_jullie = list(text_df[text_df.word.isin(["you", "jullie", "j"])].sidx)
unrepeatable_sidx_list.extend(idx_of_sent_talking_about_jullie)
print("\nList of sent having YOU\n{}".format(idx_of_sent_talking_about_jullie))
sidx_with_bad_words = cls.__get_sidx_with_bad_words(text_df)
unrepeatable_sidx_list.extend(sidx_with_bad_words)
print("\nList of sent having Bad Words\n{}".format(sidx_with_bad_words))
sidx_of_not_basic_svo_sent = cls.get_sidx_of_not_basic_svo_sent(text_df)
unrepeatable_sidx_list.extend(sidx_of_not_basic_svo_sent)
print("\nList of Not Basic SVO sent\n{}".format(sidx_of_not_basic_svo_sent))
question_or_meaningless_sidx = cls.__get_question_or_meaningless_sidx(text_df, intent_list)
unrepeatable_sidx_list.extend(question_or_meaningless_sidx)
print("\nList of Question or Meaninglesss sidx sent\n{}".format(question_or_meaningless_sidx))
normal_and_too_long_sidx = cls.__get_sidx_of_normal_and_too_long_sent(text_df)
unrepeatable_sidx_list.extend(normal_and_too_long_sidx)
print("\nList of Normal and Too Long sidx sent\n{}".format(normal_and_too_long_sidx))
unrepeatable_sidx_list = list(set(unrepeatable_sidx_list))
return unrepeatable_sidx_list
except Exception:
logging.exception(str(__name__))
return list(text_df.sidx)
@classmethod
def __get_sidx_of_normal_and_too_long_sent(cls, df):
try:
delete_sidx_list = []
for sidx in set(df.sidx.values):
target_df = df[df.sidx == sidx].copy().reset_index(drop=True)
if cls.__is_special_type(target_df):
pass
else:
if len(WordFormatter.Series2Str(target_df.word)) > 75:
delete_sidx_list.append(sidx)
else:
pass
return delete_sidx_list
except:
logging.exception('')
return []
@classmethod
def __is_special_type(cls, df):
try:
if cls.__mean_no_friends(df):
return True
elif cls.__has_what_to_do(df):
return True
elif cls.__is_despising_himself(df):
return True
elif cls.__has_nobody_V(df):
return True
elif cls.__does_user_feel_useless(df):
return True
elif cls.__has_say_plus_bad_word(df):
return True
elif cls.__exists_want_to(df):
return True
elif cls.__exists_make_S_feel_ADJ(df):
return True
elif cls.__has_because(df):
return True
elif cls.__has_dont_think_SV_sent(df):
return True
elif cls.__has_need_NN(df):
return True
elif cls.__has_wish_S_V(df):
return True
else:
return False
except:
logging.exception('')
return False
@staticmethod
def __get_question_or_meaningless_sidx(text_df, intent_list):
try:
sidx_list = sorted(list(set(text_df.sidx)))
meaningless_sent_index = []
for sidx, intent in zip(sidx_list, intent_list):
df = text_df[text_df.sidx == sidx].copy().reset_index(drop=True)
if intent.value in [Intent.MEANINGLESS.value] + Intent.ALL_QUESTION_TYPES.value:
meaningless_sent_index.append(sidx)
elif len(df) < 3:
meaningless_sent_index.append(sidx)
return meaningless_sent_index
except:
logging.exception('')
return []
# sent doesnt consist of easy S,V,O such as "I like you"
@staticmethod
def get_sidx_of_not_basic_svo_sent(text_df):
try:
delete_sidx_list = []
for sidx in set(text_df.sidx.values):
df = text_df[text_df.sidx == sidx]
noun_list = Nlp_util.make_noun_list(df)
verb_list = Nlp_util.make_verb_list(df, type="normal")
# catch the case such as "Dont judge me"
if Nlp_util.is_any_verb_before_first_noun(noun_list, verb_list):
delete_sidx_list.append(sidx)
# catch the case such as "the situation horrible as like he said"
elif not Nlp_util.is_any_verb_for_first_noun(noun_list, verb_list):
delete_sidx_list.append(sidx)
else:
pass
return delete_sidx_list
except:
logging.exception('')
return []
@classmethod
def get_sentiment_of_repeat_target_sent(cls, text_df, sentiment_score_df):
try:
if text_df is None:
return None
repeat_df = text_df
delete_sidx_list = list(
sentiment_score_df[sentiment_score_df.nscore.isin([0]) & sentiment_score_df.pscore.isin([0])].sidx)
delete_sidx_list.extend(list(text_df[text_df.word.isin(["you", "jullie", "j"])].sidx))
delete_sidx_list.extend(cls.__get_sidx_with_bad_words(repeat_df))
delete_sidx_list.extend(cls.get_sidx_of_not_basic_svo_sent(repeat_df))
if len(set(delete_sidx_list)) == len(set(repeat_df.sidx.values)):
return None
target_sentiment_score_df = sentiment_score_df[~sentiment_score_df.sidx.isin(list(set(delete_sidx_list)))]
print("\nTarget Sentiment Score Df\n{}".format(target_sentiment_score_df))
if any(abs(target_sentiment_score_df.nscore) > 0) and any(target_sentiment_score_df.pscore > 0):
return "neutral"
elif any(abs(target_sentiment_score_df.nscore) > 0) and all(target_sentiment_score_df.pscore == 0):
return "negative"
elif all(abs(target_sentiment_score_df.nscore) == 0) and any(target_sentiment_score_df.pscore > 0):
return "positive"
else:
return None
except Exception:
logging.exception('Error at generate_repeat in ' + str(__name__))
return None
@staticmethod
def __get_two_longest_sentences(text_df):
length_of_df = [len(text_df[text_df.sidx == i]) for i in list(set(text_df.sidx))]
largest2 = nlargest(2, length_of_df)
length_of_df = | pd.DataFrame({'length': length_of_df}) | pandas.DataFrame |
import os
from multiprocessing import Pool
import pandas as pd
# import rioxarray as rxr
import geopandas as gpd
import fiona
from shapely.geometry import Polygon
from shapely.ops import linemerge
import zipfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
processed_data_dir = os.path.join(BASE_DIR, 'processed_data')
vector_save_path = os.path.join(processed_data_dir, 'grouped_hydrographic_features')
if not os.path.exists(vector_save_path):
os.mkdir(vector_save_path)
#
# Using the regrouped hydrologic regions, (process_hydrologic_regions.py),
# group the stream vectors for dem processing
#
def fill_holes(data):
interior_gaps = data.interiors.values.tolist()[0]
group_name = data.index.values[0]
gap_list = []
if interior_gaps is not None:
print(f' ...{len(interior_gaps)} gaps found in {group_name} groupings.')
for i in interior_gaps:
gap_list.append(Polygon(i))
data_gaps = gpd.GeoDataFrame(geometry=gap_list, crs=data.crs)
appended_set = data.append(data_gaps)
appended_set['group'] = 0
merged_polygon = appended_set.dissolve(by='group')
return merged_polygon.geometry.values[0]
else:
print(f' ...no gaps found in {group_name}')
return data.geometry.values[0]
# nhn_path = '/media/danbot/Samsung_T5/geospatial_data/WSC_data/NHN_feature_data/'
nhn_path = '/home/danbot/Documents/code/hysets_validation/source_data/NHN_feature_data/'
nhn_feature_path = os.path.join(nhn_path, 'BC_NHN_features/')
seak_path = os.path.join(nhn_path, 'SEAK_features')
bc_groups_path = os.path.join(processed_data_dir, 'merged_basin_groups/')
bc_groups = gpd.read_file(bc_groups_path + 'BC_transborder_final_regions_4326.geojson')
bc_groups = bc_groups.to_crs(3005)
# 1. get the list of coastal + island regions
coast_groups = [
'08A', '08B', '08C', '08D',
'08E', '08F', '08G', '08M',
'09M'
]
coast_islands = ['08O', '08H']
seak_groups = ['08A', '08B', '08C', '08D']
seak_dict = {
'08A': [19010405, 19010404, 19010403, 19010406],
'08B': [19010301, 19010302, 19010303, 19010304,
19010206, 19010204, 19010212, 19010211],
'08C': [19010210, 19010208, 19010207, 19010205],
'08D': [19010103, 19010209, 19010104, 19010102],
}
# 2. retrieve the polygons associated with the 'region' boundary.
# 3. retrieve littoral / shoreline layers and merge them
# 4. split the region polygon using the line created in step 3.
# 5. discard the sea surface polygon
# 6. save new polygon and use to trim DEM in dem_basin_mapper.py
# collection of individual linestrings for splitting in a
# list and add the polygon lines to it.
# line_split_collection.append(polygon.boundary)
# merged_lines = shapely.ops.linemerge(line_split_collection)
# border_lines = shapely.ops.unary_union(merged_lines)
# decomposition = shapely.ops.polygonize(border_lines)
# load and merge the SEAK files into one gdf
seak_streams_path = os.path.join(nhn_path, 'SEAK_WBDHU8_polygons.geojson')
SEAK_polygons = gpd.read_file(seak_streams_path)
SEAK_polygons = SEAK_polygons.to_crs(3005)
SEAK_files = os.listdir(seak_path)
def retrieve_and_group_layers(feature_path, files, target_crs, target_layer):
dfs = []
all_crs = []
print(f' ...checking features at {feature_path} for layer {target_layer}.')
for file in files:
file_layers = zipfile.ZipFile(os.path.join(feature_path, file)).namelist()
layers = [e for e in file_layers if (target_layer in e) & (e.endswith('.shp'))]
if layers:
for layer in layers:
layer_path = os.path.join(feature_path, file) + f'!{layer}'
df = gpd.read_file(layer_path)
crs = df.crs
print(f' crs={crs}')
if crs not in all_crs:
all_crs.append(crs)
print(f' new crs found: {crs}')
df = df.to_crs(target_crs)
# append the dataframe to the group list
dfs.append(df)
else:
print(f'no target layers found in {file}')
return dfs
all_crs = []
# bc_groups = bc_groups[bc_groups['group_name'] == '08H'].copy()
# print(bc_groups)
target_crs = 3005
bc_groups = bc_groups.to_crs(target_crs)
bc_groups = bc_groups[bc_groups['group_name'].isin(['08B', '08C', '08D'])]
for i, row in bc_groups.iterrows():
grp_code = row['group_name']
sda_codes = row['WSCSDAs']
if sda_codes == None:
sda_codes = [row['group_code'].lower()]
grp_code = row['group_code']
else:
sda_codes = [e.lower() for e in row['WSCSDAs'].split(',')]
print(f'Starting stream vector merge on {grp_code}: {sda_codes}')
nhn_files = [e for e in os.listdir(nhn_feature_path) if e.split('_')[2][:3] in sda_codes]
# there is one sub-sub basin region polygon that has
# a corrupt archive and needs to be filtered out
bad_zip_file_link = 'https://ftp.maps.canada.ca/pub/nrcan_rncan/vector/geobase_nhn_rhn/shp_en/08/nhn_rhn_08nec00_shp_en.zip'
bad_zip_file = bad_zip_file_link.split('/')[-1]
# skip the bad file:
nhn_files_trimmed = [f for f in nhn_files if f != bad_zip_file]
seak_included = False
for target_layer in ['WATERBODY', 'ISLAND', 'NLFLOW', 'LITTORAL',]:
df_list = []
group_stream_layers = []
print(f' Starting merge of {target_layer} features.')
output_folder = os.path.join(vector_save_path, f'{grp_code}/{target_layer}/')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# use geojson for littoral and island (polygons)
# use .shp for stream network (NLFLOW layer)
output_filename = f'{grp_code}_{target_layer}_{target_crs}.geojson'
if target_layer in ['NLFLOW']:
output_filename = f'{grp_code}_{target_layer}_{target_crs}.shp'
output_filepath = os.path.join(output_folder, output_filename)
if not os.path.exists(output_filepath):
nhn_dfs = retrieve_and_group_layers(nhn_feature_path, nhn_files_trimmed, target_crs, target_layer)
if len(nhn_dfs) == 0:
continue
else:
nhn_gdf = gpd.GeoDataFrame(pd.concat(nhn_dfs, ignore_index=True), crs=target_crs)
print(f' {len(nhn_gdf)} NHN items found.')
# nhn_gdf['intersects_group_polygon'] = gpd.sjoin(gdf, row, how='inner', predicate='contains')
# gdf = gdf[gdf['intersects_group_polygon']].copy()
# print(nhn_gdf.head())
if nhn_gdf.empty:
continue
else:
df_list.append(nhn_gdf)
if (target_layer == 'NLFLOW') & (grp_code in seak_dict.keys()):
huc_codes = [str(e) for e in seak_dict[grp_code]]
print('')
print(f' ...searching for USGS vector info for {grp_code}.')
group_seak_files = []
for h in huc_codes:
files = [f for f in SEAK_files if h in f]
if len(files) > 0:
group_seak_files += files
# there should be as many files as there are codes,
# otherwise a file is missing.
assert len(group_seak_files) == len(seak_dict[grp_code])
# get the southeast alaska hydrographic feature files
seak_dfs = retrieve_and_group_layers(seak_path, group_seak_files, target_crs, 'NHDFlowline')
seak_gdf = gpd.GeoDataFrame(pd.concat(seak_dfs, ignore_index=True), crs=target_crs)
# seak_gdf = seak_gdf.iloc[:5000]
# seak_gdf = gpd.GeoDataFrame(pd.concat([gdf,seak_layer], ignore_index=True), crs=target_crs)
print(f' {len(seak_gdf)} SEAK items found.')
if not seak_gdf.empty:
df_list.append(seak_gdf)
if len(df_list) > 0:
gdf = gpd.GeoDataFrame( | pd.concat(df_list, ignore_index=True) | pandas.concat |
import pandas as pd
sep:str = '_'
debug:bool = True
student_column_purge_list = ['id','email','ssn','address'] # remove id,email,ssn and address
def process(students_file_name:str, teachers_file_name:str) -> str:
# read csv file into df
students_df = pd.read_csv(students_file_name, delimiter=sep)
if debug:
print(students_df)
assert len(students_df.columns) == 7, f"Something wrong with the file {students_file_name}"
assert 'fname' in students_df.columns and 'lname' in students_df.columns and 'cid' in students_df.columns, f'Something wrong with the file {students_file_name}, columns - fname, lname or cid do not exist.'
students_df.drop(['id','email','ssn','address'], axis=1, inplace=True)
if debug:
print(students_df)
print(students_df.count())
# read parquet file into df
teachers_df = pd.read_parquet(teachers_file_name)
assert len(teachers_df.columns) == 7, f"Something wrong with the file {teachers_file_name}"
assert 'fname' in teachers_df.columns and 'lname' in teachers_df.columns and 'cid' in teachers_df.columns, f'Something wrong with the file {teachers_file_name}, columns - fname, lname or cid do not exist.'
if debug:
print(teachers_df)
print(teachers_df.count())
teachers_df.rename(columns={'fname': 'teachers_fname', 'lname': 'teachers_lname'}, inplace=True)
# trim unused columns
teachers_drop_list = ['id','email', 'ssn', 'address']
teachers_df.drop(teachers_drop_list, axis=1, inplace=True)
# join teacher with formatted students
join_df = | pd.merge(students_df, teachers_df, on='cid', how='inner') | pandas.merge |
import os
import csv
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from plot import *
from os.path import join
from pathlib import Path
from sklearn.cluster import KMeans
from collections import Counter
from torch.utils.data import DataLoader, Subset
from customLoader import *
from torchvision.transforms import transforms
from IPython import embed
def get_loader(trajectories, transform, conf, shuffle=False, limit=None):
train, _ = get_train_val_split(trajectories, 1)
train_dataset = CustomMinecraftData(train, transform=transform, delay=False, **conf)
if not limit == None:
train_dataset = Subset(train_dataset, limit)
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=shuffle, num_workers=0)
return train_dataloader
def compute_kmeans(embeddings, num_clusters):
return KMeans(n_clusters=num_clusters, random_state=0).fit(embeddings)
def compute_embeddings(loader, model):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return np.array([model.compute_embedding(batch, device).detach().cpu().numpy() for batch in loader]).squeeze()
def get_images(loader):
return torch.cat([data[:,0] for data in loader])
def load_trajectories(trajectories, limit=None):
print("Loading trajectories...")
all_trajectories = []
files = sorted([x for x in os.listdir(f"./results/{trajectories}/") if 'coords' in x], key=lambda x: int(x.split('.')[1]))
for file in files:
with open(f"./results/{trajectories}/{file}") as csv_file:
trajectory = []
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for i, row in enumerate(csv_reader):
trajectory.append(row)
all_trajectories.append(trajectory)
trajs = np.array(all_trajectories).reshape(-1, 3)
if not limit == None:
return trajs[limit]
return trajs
def construct_map(enc):
if not enc.limit == None:
limit = [x*10 for x in range(enc.limit)]
else: limit = None
loader = get_loader(
enc.trajectories,
enc.transform,
enc.conf,
shuffle=enc.shuffle,
limit=limit)
if 'Custom' in enc.trajectories[0]:
trajectories = load_trajectories(enc.trajectories[0], limit)
embeddings = compute_embeddings(loader, enc)
if enc.type == "index":
indexes = get_indexes(trajectories, embeddings, enc)
index_map(enc, indexes)
elif enc.type == "reward":
reward_map(trajectories, embeddings, enc, loader)
elif enc.type == "embed":
images = get_images(loader) + 0.5
embed_map(embeddings, images, enc.experiment)
elif enc.type == "centroides":
indexes = get_indexes(trajectories, embeddings, enc)
centroides_map(enc, loader, indexes)
else:
raise NotImplementedError()
def get_indexes(trajectories, embeddings, enc):
print("Get index from all data points...")
values = pd.DataFrame(columns=['x', 'y', 'Code:'])
for i, (e, p) in enumerate(zip(embeddings, trajectories)):
x = float(p[2])
y = float(p[0])
e = torch.from_numpy(e).cuda()
k = enc.compute_argmax(e.unsqueeze(dim=0))
if k==3:
values = values.append(
{'x': x, 'y': y, 'Code:': int(k)}, ignore_index=True)
values['Code:'] = values['Code:'].astype('int32')
return values
def centroides_map(encoder, loader, indexes):
experiment = encoder.experiment
_, coord_list = encoder.model.list_reconstructions()
world = getWorld(encoder.trajectories[0])
palette = sns.color_palette("Paired", n_colors=encoder.num_clusters)
experiment = encoder.test['path_weights'].split('/')[0]
centroides_indexmap(coord_list, indexes, palette, experiment, world, loader)
def index_map(enc, indexes):
code_list = indexes['Code:'].tolist()
codes_count = Counter(code_list)
palette = sns.color_palette("Paired", n_colors=len(list(set(code_list))))
experiment = enc.test['path_weights'].split('/')[0]
world = getWorld(enc.trajectories[0])
plot_idx_maps(indexes, palette, experiment, world)
skill_appearance(codes_count, palette, experiment, world)
def reward_map(trajectories, embeddings, enc, loader):
print("Get index from all data points...")
data_list = []
for g in range(enc.num_clusters):
print(f"Comparing data points with goal state {g}", end="\r")
values = | pd.DataFrame(columns=['x', 'y', 'reward']) | pandas.DataFrame |
from collections import Counter
from itertools import combinations
from math import sqrt
import random
from keras.layers import Concatenate, Dense, Dot, Dropout, Embedding, Input, Reshape
from keras.models import Model
from keras.callbacks import Callback, ModelCheckpoint
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import tensorflow
STUDENT_ID = '20622268'
# Function to calculate RMSE
def rmse(pred, actual):
# Ignore nonzero terms.
pred = pred[actual.nonzero()].flatten()
actual = actual[actual.nonzero()].flatten()
return sqrt(mean_squared_error(pred, actual))
def build_cfmodel(n_users, n_items, embed_size, output_layer='dot'):
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
user_emb = Embedding(output_dim=embed_size, input_dim=n_users, input_length=1)(user_input)
user_emb = Reshape((embed_size,))(user_emb)
item_emb = Embedding(output_dim=embed_size, input_dim=n_items, input_length=1)(item_input)
item_emb = Reshape((embed_size,))(item_emb)
if output_layer == 'dot':
model_output = Dot(axes=1)([user_emb, item_emb])
elif output_layer == 'mlp':
mlp_input = Concatenate()([user_emb, item_emb])
dense_1 = Dense(64, activation='relu')(mlp_input)
dense_1_dp = Dropout(0.15)(dense_1)
dense_2 = Dense(32, activation='relu')(dense_1_dp)
dense_2_dp = Dropout(0.15)(dense_2)
model_output = Dense(1)(dense_2_dp)
else:
raise NotImplementedError
model = Model(inputs=[user_input, item_input],
outputs=model_output)
return model
def build_deepwide_model(len_continuous, deep_vocab_lens, deep2_vocab_lens, len_wide, embed_size):
embed_size2 = 32
input_list = []
continuous_input = Input(shape=(len_continuous,), dtype='float32', name='continuous_input')
input_list.append(continuous_input)
emb_list = []
for vocab_size in deep_vocab_lens:
embed_size = int(vocab_size**0.25)
_input = Input(shape=(1,), dtype='int32')
input_list.append(_input)
_emb = Embedding(output_dim=embed_size, input_dim=vocab_size, input_length=1)(_input)
_emb = Reshape((embed_size,))(_emb)
emb_list.append(_emb)
for vocab_size in deep2_vocab_lens:
embed_size2 = int(vocab_size**0.25)
_input = Input(shape=(1,), dtype='int32')
input_list.append(_input)
_emb2 = Embedding(output_dim=embed_size2, input_dim=vocab_size, input_length=1)(_input)
_emb2 = Reshape((embed_size2,))(_emb2)
emb_list.append(_emb2)
deep_input = Concatenate()(emb_list + [continuous_input])
dense_1 = Dense(512, activation='relu')(deep_input)
dense_1_dp = Dropout(0.3)(dense_1)
dense_2 = Dense(256, activation='relu')(dense_1_dp)
dense_2_dp = Dropout(0.3)(dense_2)
dense_3 = Dense(128, activation='relu')(dense_2_dp)
dense_3_dp = Dropout(0.3)(dense_3)
wide_input = Input(shape=(len_wide,), dtype='float32')
input_list.append(wide_input)
fc_input = Concatenate()([dense_3_dp, wide_input])
dense_1 = Dense(8, activation='sigmoid')(fc_input)
model_output = Dense(1)(dense_1)
model = Model(inputs=input_list,
outputs=model_output)
return model
def get_continuous_features(df, continuous_columns):
continuous_features = df[continuous_columns].values
# continuous_features = df[continuous_columns].a
return continuous_features
def get_top_k_p_combinations(df, comb_p, topk, output_freq=False):
def get_category_combinations(categories_str, comb_p=2):
categories = categories_str.split(', ')
return list(combinations(categories, comb_p))
all_categories_p_combos = df["item_categories"].apply(
lambda x: get_category_combinations(x, comb_p)).values.tolist()
all_categories_p_combos = [tuple(t) for item in all_categories_p_combos for t in item]
tmp = dict(Counter(all_categories_p_combos))
sorted_categories_combinations = list(sorted(tmp.items(), key=lambda x: x[1], reverse=True))
if output_freq:
return sorted_categories_combinations[:topk]
else:
return [t[0] for t in sorted_categories_combinations[:topk]]
def get_wide_features(df):
def categories_to_binary_output(categories):
binary_output = [0 for _ in range(len(selected_categories_to_idx))]
for category in categories.split(', '):
if category in selected_categories_to_idx:
binary_output[selected_categories_to_idx[category]] = 1
else:
binary_output[0] = 1
return binary_output
def categories_cross_transformation(categories):
current_category_set = set(categories.split(', '))
corss_transform_output = [0 for _ in range(len(top_combinations))]
for k, comb_k in enumerate(top_combinations):
if len(current_category_set & comb_k) == len(comb_k):
corss_transform_output[k] = 1
else:
corss_transform_output[k] = 0
return corss_transform_output
category_binary_features = np.array(df.item_categories.apply(
lambda x: categories_to_binary_output(x)).values.tolist())
category_corss_transform_features = np.array(df.item_categories.apply(
lambda x: categories_cross_transformation(x)).values.tolist())
return np.concatenate((category_binary_features, category_corss_transform_features), axis=1)
root_path = ""
tr_df = | pd.read_csv(root_path + "data/train.csv") | pandas.read_csv |
"""
Module containing the Company Class.
Abreviations used in code:
dfi = input dataframe
dfo = output dataframe
"""
from typing import Literal
import numpy as np
import pandas as pd
from . import config as c
class Company:
"""
Finance Data Class for listed Brazilian Companies.
Attributes
----------
identifier: int or str
A unique identifier to filter a company in as fi. Both CVM
ID or Fiscal ID can be used. CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
"""
def __init__(
self,
identifier: int | str,
acc_method: Literal["consolidated", "separate"] = "consolidated",
acc_unit: float | str = 1.0,
tax_rate: float = 0.34,
):
"""Initialize main variables.
Parameters
----------
identifier: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
acc_method : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
acc_unit : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
tax_rate : float, default 0.34
The 'tax_rate' attribute will be used to calculate some of the
company indicators.
"""
self.set_id(identifier)
self.acc_method = acc_method
self.acc_unit = acc_unit
self.tax_rate = tax_rate
def set_id(self, identifier: int | str):
"""
Set a unique identifier to filter the company in as fi.
Parameters
----------
value: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
Returns
-------
int or str
Raises
------
KeyError
* If passed ``identifier`` not found in as fi.
"""
# Create custom data frame for ID selection
df = (
c.main_df[["cvm_id", "fiscal_id"]]
.drop_duplicates()
.astype({"cvm_id": int, "fiscal_id": str})
)
if identifier in df["cvm_id"].values:
self._cvm_id = identifier
self._fiscal_id = df.loc[df["cvm_id"] == identifier, "fiscal_id"].item()
elif identifier in df["fiscal_id"].values:
self._fiscal_id = identifier
self._cvm_id = df.loc[df["fiscal_id"] == identifier, "cvm_id"].item()
else:
raise KeyError("Company 'identifier' not found in database")
# Only set company data after object identifier validation
self._set_main_data()
@property
def acc_method(self):
"""
Get or set accounting method used for registering investments in
subsidiaries.
Parameters
----------
value : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
Returns
-------
str
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_method.setter
def acc_method(self, value: Literal["consolidated", "separate"]):
if value in {"consolidated", "separate"}:
self._acc_method = value
else:
raise ValueError("acc_method expects 'consolidated' or 'separate'")
@property
def acc_unit(self):
"""
Get or set a constant to divide company account values.
Parameters
----------
value : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_unit.setter
def acc_unit(self, value: float | str):
if value == "thousand":
self._acc_unit = 1_000
elif value == "million":
self._acc_unit = 1_000_000
elif value == "billion":
self._acc_unit = 1_000_000_000
elif value >= 0:
self._acc_unit = value
else:
raise ValueError("Accounting Unit is invalid")
@property
def tax_rate(self):
"""
Get or set company 'tax_rate' attribute.
Parameters
----------
value : float, default 0.34
'value' will be passed to 'tax_rate' object attribute if
0 <= value <= 1.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value: float):
if 0 <= value <= 1:
self._tax_rate = value
else:
raise ValueError("Company 'tax_rate' value is invalid")
def _set_main_data(self) -> pd.DataFrame:
self._COMP_DF = (
c.main_df.query("cvm_id == @self._cvm_id")
.astype(
{
"co_name": str,
"cvm_id": np.uint32,
"fiscal_id": str,
"report_type": str,
"report_version": str,
"period_reference": "datetime64",
"period_begin": "datetime64",
"period_end": "datetime64",
"period_order": np.int8,
"acc_code": str,
"acc_name": str,
"acc_method": str,
"acc_fixed": bool,
"acc_value": float,
"equity_statement_column": str,
}
)
.sort_values(by="acc_code", ignore_index=True)
)
self._NAME = self._COMP_DF["co_name"].iloc[0]
self._FIRST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].min()
self._LAST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].max()
self._LAST_QUARTERLY = self._COMP_DF.query('report_type == "quarterly"')[
"period_end"
].max()
def info(self) -> pd.DataFrame:
"""Return dataframe with company info."""
company_info = {
"Name": self._NAME,
"CVM ID": self._cvm_id,
"Fiscal ID (CNPJ)": self._fiscal_id,
"Total Accounting Rows": len(self._COMP_DF.index),
"Selected Tax Rate": self._tax_rate,
"Selected Accounting Method": self._acc_method,
"Selected Accounting Unit": self._acc_unit,
"First Annual Report": self._FIRST_ANNUAL.strftime("%Y-%m-%d"),
"Last Annual Report": self._LAST_ANNUAL.strftime("%Y-%m-%d"),
"Last Quarterly Report": self._LAST_QUARTERLY.strftime("%Y-%m-%d"),
}
df = pd.DataFrame.from_dict(company_info, orient="index", columns=["Values"])
df.index.name = "Company Info"
return df
def report(
self,
report_type: str,
acc_level: int | None = None,
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a DataFrame with company selected report type.
This function generates a report representing one of the financial
statements for the company adjusted by the attributes passed and
returns a pandas.DataFrame with this report.
Parameters
----------
report_type : {'assets', 'liabilities_and_equity', 'liabilities',
'equity', 'income', 'cash_flow'}
Report type to be generated.
acc_level : {None, 2, 3, 4}, default None
Detail level to show for account codes.
acc_level = None -> X... (default: show all accounts)
acc_level = 2 -> X.YY (show 2 levels)
acc_level = 3 -> X.YY.ZZ (show 3 levels)
acc_level = 4 -> X.YY.ZZ.WW (show 4 levels)
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
------
pandas.DataFrame
Raises
------
ValueError
* If ``report_type`` attribute is invalid
* If ``acc_level`` attribute is invalid
"""
# Check input arguments.
if acc_level not in {None, 2, 3, 4}:
raise ValueError("acc_level expects None, 2, 3 or 4")
df = self._COMP_DF.query("acc_method == @self._acc_method").copy()
# Change acc_unit only for accounts different from 3.99
df["acc_value"] = np.where(
df["acc_code"].str.startswith("3.99"),
df["acc_value"],
df["acc_value"] / self._acc_unit,
)
# Filter dataframe for selected acc_level
if acc_level:
acc_code_limit = acc_level * 3 - 2 # noqa
df.query("acc_code.str.len() <= @acc_code_limit", inplace=True)
"""
Filter dataframe for selected report_type (report type)
df['acc_code'].str[0].unique() -> [1, 2, 3, 4, 5, 6, 7]
The first part of 'acc_code' is the report type
Table of reports correspondence:
1 -> Balance Sheet - Assets
2 -> Balance Sheet - Liabilities and Shareholders’ Equity
3 -> Income
4 -> Comprehensive Income
5 -> Changes in Equity
6 -> Cash Flow (Indirect Method)
7 -> Added Value
"""
report_types = {
"assets": ["1"],
"cash": ["1.01.01", "1.01.02"],
"current_assets": ["1.01"],
"non_current_assets": ["1.02"],
"liabilities": ["2.01", "2.02"],
"debt": ["2.01.04", "2.02.01"],
"current_liabilities": ["2.01"],
"non_current_liabilities": ["2.02"],
"liabilities_and_equity": ["2"],
"equity": ["2.03"],
"income": ["3"],
# "earnings_per_share": ["3.99.01.01", "3.99.02.01"],
"earnings_per_share": ["3.99"],
"comprehensive_income": ["4"],
"changes_in_equity": ["5"],
"cash_flow": ["6"],
"added_value": ["7"],
}
acc_codes = report_types[report_type]
expression = ""
for count, acc_code in enumerate(acc_codes):
if count > 0:
expression += " or "
expression += f'acc_code.str.startswith("{acc_code}")'
df.query(expression, inplace=True)
# remove earnings per share from income statment
if report_type == 'income':
df = df[~df['acc_code'].str.startswith("3.99")]
if report_type in {"income", "cash_flow"}:
df = self._calculate_ttm(df)
df.reset_index(drop=True, inplace=True)
report_df = self._make_report(df)
report_df.set_index(keys="acc_code", drop=True, inplace=True)
# Show only selected years
if num_years > 0:
cols = report_df.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
report_df = report_df[cols]
return report_df
def _calculate_ttm(self, dfi: pd.DataFrame) -> pd.DataFrame:
if self._LAST_ANNUAL > self._LAST_QUARTERLY:
return dfi.query('report_type == "annual"').copy()
df1 = dfi.query("period_end == @self._LAST_QUARTERLY").copy()
df1.query("period_begin == period_begin.min()", inplace=True)
df2 = dfi.query("period_reference == @self._LAST_QUARTERLY").copy()
df2.query("period_begin == period_begin.min()", inplace=True)
df2["acc_value"] = -df2["acc_value"]
df3 = dfi.query("period_end == @self._LAST_ANNUAL").copy()
df_ttm = (
pd.concat([df1, df2, df3], ignore_index=True)[["acc_code", "acc_value"]]
.groupby(by="acc_code")
.sum()
.reset_index()
)
df1.drop(columns="acc_value", inplace=True)
df_ttm = pd.merge(df1, df_ttm)
df_ttm["report_type"] = "quarterly"
df_ttm["period_begin"] = self._LAST_QUARTERLY - pd.DateOffset(years=1)
df_annual = dfi.query('report_type == "annual"').copy()
return pd.concat([df_annual, df_ttm], ignore_index=True)
def custom_report(
self,
acc_list: list[str],
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a financial report from custom list of accounting codes
Creates DataFrame object with a custom list of accounting codes
adjusted by function attributes
Parameters
----------
acc_list : list[str]
A list of strings containg accounting codes to be used in report
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
-------
pandas.DataFrame
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_is = self.report("income")
df_cf = self.report("cash_flow")
dfo = pd.concat([df_as, df_le, df_is, df_cf]).query("acc_code == @acc_list")
# Show only selected years
if num_years > 0:
cols = dfo.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
dfo = dfo[cols]
return dfo
@staticmethod
def _prior_values(s: pd.Series, is_prior: bool) -> pd.Series:
"""Shift row to the right in order to obtain series previous values"""
if is_prior:
arr = s.iloc[:-1].values
return np.append(np.nan, arr)
else:
return s
def indicators(self, num_years: int = 0, is_prior: bool = True) -> pd.DataFrame:
"""
Return company main operating indicators.
Creates DataFrame object with company operating indicators as
described in reference [1]
Parameters
----------
num_years : int, default 0
Select how many last years to show where 0 -> show all years
is_prior : bool, default True
Divide return measurements by book values from the end of the prior
year (see Damodaran reference).
Returns
-------
pandas.Dataframe
References
----------
.. [1] <NAME>, "Return on Capital (ROC), Return on Invested
Capital (ROIC) and Return on Equity (ROE): Measurement and
Implications.", 2007,
https://people.stern.nyu.edu/adamodar/pdfoles/papers/returnmeasures.pdf
https://people.stern.nyu.edu/adamodar/New_Home_Page/datafile/variable.htm
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_in = self.report("income")
df_cf = self.report("cash_flow")
df = pd.concat([df_as, df_le, df_in, df_cf]).drop(
columns=["acc_fixed", "acc_name"]
)
# Calculate indicators series
revenues = df.loc["3.01"]
gross_profit = df.loc["3.03"]
ebit = df.loc["3.05"]
ebt = df.loc["3.07"]
effective_tax = df.loc["3.08"]
depreciation_amortization = df.loc["6.01.01.04"]
ebitda = ebit + depreciation_amortization
operating_cash_flow = df.loc["6.01"]
# capex = df.loc["6.02"]
net_income = df.loc["3.11"]
total_assets = df.loc["1"]
total_assets_p = self._prior_values(total_assets, is_prior)
equity = df.loc["2.03"]
equity_p = self._prior_values(equity, is_prior)
total_cash = df.loc["1.01.01"] + df.loc["1.01.02"]
current_assets = df.loc["1.01"]
current_liabilities = df.loc["2.01"]
working_capital = current_assets - current_liabilities
total_debt = df.loc["2.01.04"] + df.loc["2.02.01"]
net_debt = total_debt - total_cash
invested_capital = total_debt + equity - total_cash
invested_capital_p = self._prior_values(invested_capital, is_prior)
# Output Dataframe (dfo)
dfo = | pd.DataFrame(columns=df.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Load and process Park et al. data
#
# For each sample, we want to compute:
#
# * (non-silent) binary mutation status in the gene of interest
# * binary copy gain/loss status in the gene of interest
# * what "class" the gene of interest is in (more detail on what this means below)
#
# We'll save this to a file since the preprocessing takes a few minutes, so we can load it quickly in downstream analysis scripts.
# In[1]:
from pathlib import Path
import pickle as pkl
import pandas as pd
import sys; sys.path.append('..')
import config as cfg
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[2]:
# park et al. geneset info
park_loss_data = cfg.data_dir / 'park_loss_df.tsv'
park_gain_data = cfg.data_dir / 'park_gain_df.tsv'
# park et al. significant gene info
park_loss_sig_data = cfg.data_dir / 'park_loss_df_sig_only.tsv'
park_gain_sig_data = cfg.data_dir / 'park_gain_df_sig_only.tsv'
# mutation and copy number data
pancancer_pickle = Path('/home/jake/research/mpmp/data/pancancer_data.pkl')
# ### Load data from Park et al. supp. info
# In[3]:
park_loss_df = | pd.read_csv(park_loss_data, sep='\t', index_col=0) | pandas.read_csv |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 9.0, 'samples_count_45m': 4.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 1.0, 'samples_count_45m': 5.0, 'samples_count_1h': 10.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 6.0, 'samples_count_1h': 11.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918,
'isotope': 'U235'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_fixed_window_aggregation_with_multiple_keys_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U238'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U238'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U238'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U238'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U238'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U238'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U238'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U238'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U238']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['10m', '15m']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235, 'isotope': 'U235'},
{'samples_count_10m': 2.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524, 'isotope': 'U238'},
{'samples_count_10m': 3.0, 'samples_count_15m': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764, 'isotope': 'U235'},
{'samples_count_10m': 3.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822, 'isotope': 'U235'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918, 'isotope': 'U238'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_sliding_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
SlidingWindows(['15m', '25m', '45m', '1h'], '5m'))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 7.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 8.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 8.0, 'samples_count_45m': 9.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 9.0, 'samples_count_45m': 10.0, 'samples_count_1h': 10.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 11.0, 'samples_count_1h': 11.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 12.0, 'samples_count_1h': 12.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 13.0, 'samples_count_1h': 13.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'),
'signal': 656.831, 'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 8.0, 'samples_count_45m': 14.0, 'samples_count_1h': 14.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 9.0, 'samples_count_45m': 15.0, 'samples_count_1h': 15.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 15.0, 'samples_count_1h': 16.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 14.0, 'samples_count_1h': 17.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918,
'isotope': 'U235'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_emit_max_event_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(3)),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(12):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0,
'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 10, 'number_of_stuff_sum_1h': 30, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 30,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0},
{'col1': 11, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36, 'number_of_stuff_sum_24h': 36,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 6.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_error_on_bad_emit_policy():
try:
AggregateByKey([], Table("test", NoopDriver()), emit_policy=EmitEveryEvent),
assert False
except TypeError:
pass
def test_emit_delay_aggregation_flow():
q = queue.Queue(1)
def reduce_fn(acc, x):
if x['col1'] == 2:
q.put(None)
acc.append(x)
return acc
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "count"],
SlidingWindows(['1h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(4, 1)),
Reduce([], reduce_fn),
]).run()
for i in range(11):
if i == 3:
q.get()
data = {'col1': i}
controller.emit(data, 'katya', test_base_time + timedelta(seconds=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_count_1h': 3},
{'col1': 6, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_count_1h': 7},
{'col1': 10, 'number_of_stuff_sum_1h': 55, 'number_of_stuff_count_1h': 11}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_simple_aggregation_flow():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["sum", "avg", "min", "max"],
'windows': ['1h', '2h', '24h'],
'period': '10m'}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_fixed_window():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["count"],
'windows': ['1h', '2h', '3h', '24h']}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_out_of_order_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time + timedelta(minutes=15))
controller.emit({'col1': 4}, 'tal', test_base_time + timedelta(minutes=25 * 3))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2},
{'col1': 3, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2},
{'col1': 4, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_min_1h': 1, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_min_1h': 2, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_min_1h': 3, 'number_of_stuff_min_2h': 1,
'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_min_1h': 4, 'number_of_stuff_min_2h': 2,
'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_min_1h': 5, 'number_of_stuff_min_2h': 3,
'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_min_1h': 6, 'number_of_stuff_min_2h': 4,
'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_min_1h': 7, 'number_of_stuff_min_2h': 5,
'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregation_unique_fields():
try:
build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff", "col1", ["count"],
SlidingWindows(['1h', '2h'], '15m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)), ]).run()
assert False
except TypeError:
pass
def test_fixed_window_aggregation_with_first_and_last_aggregates():
df = pd.DataFrame(
{
"timestamp": [
pd.Timestamp("2021-07-13 06:43:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:46:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:49:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:52:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:55:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:58:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:01:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:04:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:07:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:10:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:13:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:16:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 07:19:01.084587+0000", tz="UTC"),
],
"emission": [
16.44200,
64807.90231,
413.90100,
73621.21551,
53936.62158,
13582.52318,
966.80400,
450.40700,
4965.28760,
42982.57194,
1594.40460,
69601.73368,
48038.65572,
],
"sensor_id": [
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
"0654-329-05",
],
}
)
controller = build_flow(
[
DataframeSource(df, time_field="timestamp", key_field="sensor_id"),
AggregateByKey(
[
FieldAggregator(
"samples",
"emission",
["last", "first", "count"],
FixedWindows(["10m"]),
)
],
Table("MyTable", NoopDriver()),
),
Reduce([], lambda acc, x: append_return(acc, x)),
]
).run()
termination_result = controller.await_termination()
expected = [
{
"samples_last_10m": 16.442,
"samples_count_10m": 1.0,
"samples_first_10m": 16.442,
"timestamp": pd.Timestamp("2021-07-13 06:43:01.084587+0000", tz="UTC"),
"emission": 16.442,
"sensor_id": "0654-329-05",
},
{
"samples_last_10m": 64807.90231,
"samples_count_10m": 2.0,
"samples_first_10m": 16.442,
"timestamp": pd.Timestamp("2021-07-13 06:46:01.084587+0000", tz="UTC"),
"emission": 64807.90231,
"sensor_id": "0654-329-05",
},
{
"samples_last_10m": 413.901,
"samples_count_10m": 3.0,
"samples_first_10m": 16.442,
"timestamp": pd.Timestamp("2021-07-13 06:49:01.084587+0000", tz="UTC"),
"emission": 413.901,
"sensor_id": "0654-329-05",
},
{
"samples_last_10m": 73621.21551,
"samples_count_10m": 1.0,
"samples_first_10m": 73621.21551,
"timestamp": pd.Timestamp("2021-07-13 06:52:01.084587+0000", tz="UTC"),
"emission": 73621.21551,
"sensor_id": "0654-329-05",
},
{
"samples_last_10m": 53936.62158,
"samples_count_10m": 2.0,
"samples_first_10m": 73621.21551,
"timestamp": | pd.Timestamp("2021-07-13 06:55:01.084587+0000", tz="UTC") | pandas.Timestamp |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from abc import ABC
from collections import Counter
from pathlib import Path
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import pandas as pd
import torch.utils.data
from torch._six import container_abcs
from torch.utils.data import BatchSampler, DataLoader, Dataset, RandomSampler, Sampler, SequentialSampler
from torch.utils.data.dataloader import default_collate # type: ignore
from InnerEye.Common.type_annotations import IntOrString, TupleFloat3
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.dataset.sample import GeneralSampleMetadata, PatientDatasetSource, \
PatientMetadata, Sample
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.utils import io_util, ml_util
from InnerEye.ML.utils.csv_util import CSV_CHANNEL_HEADER, CSV_PATH_HEADER, \
CSV_SUBJECT_HEADER
from InnerEye.ML.utils.io_util import is_nifti_file_path
from InnerEye.ML.utils.transforms import Compose3D
COMPRESSION_EXTENSIONS = ['sz', 'gz']
def collate_with_metadata(batch: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
The collate function that the dataloader workers should use. It does the same thing for all "normal" fields
(all fields are put into tensors with outer dimension batch_size), except for the special "metadata" field.
Those metadata objects are collated into a simple list.
:param batch: A list of samples that should be collated.
:return: collated result
"""
elem = batch[0]
if isinstance(elem, container_abcs.Mapping):
result = dict()
for key in elem:
# Special handling for all fields that store metadata, and for fields that are list.
# Lists are used in SequenceDataset.
# All these are collated by turning them into lists or lists of lists.
if isinstance(elem[key], (list, PatientMetadata, GeneralSampleMetadata)):
result[key] = [d[key] for d in batch]
else:
result[key] = default_collate([d[key] for d in batch])
return result
raise TypeError(f"Unexpected batch data: Expected a dictionary, but got: {type(elem)}")
def set_random_seed_for_dataloader_worker(worker_id: int) -> None:
"""
Set the seed for the random number generators of python, numpy.
"""
# Set the seeds for numpy and python random based on the offset of the worker_id and initial seed,
# converting the initial_seed which is a long to modulo int32 which is what numpy expects.
random_seed = (torch.initial_seed() + worker_id) % (2 ** 32)
ml_util.set_random_seed(random_seed, f"Data loader worker ({worker_id})")
class _RepeatSampler(BatchSampler):
"""
A batch sampler that wraps another batch sampler. It repeats the contents of that other sampler forever.
"""
def __init__(self, sampler: Sampler, batch_size: int, drop_last: bool = False, max_repeats: int = 0) -> None:
super().__init__(sampler, batch_size, drop_last)
self.sampler = sampler
self.max_repeats = max_repeats
def __iter__(self) -> Any:
repeats = 0
while self.max_repeats == 0 or repeats < self.max_repeats:
yield from iter(self.sampler)
repeats += 1
class ImbalancedSampler(Sampler):
"""
Sampler that performs naive over-sampling by drawing samples with
replacements. The probability of being drawn depends on the label of
each data point, rare labels have a higher probability to be drawn.
Assumes the dataset implements the "get_all_labels" functions in order
to compute the weights associated with each data point.
Side note: the sampler choice is independent from the data augmentation
pipeline. Data augmentation is performed on the images while loading them
at a later stage. This sampler merely affects which item is selected.
"""
# noinspection PyMissingConstructor
def __init__(self, dataset: Any, num_samples: int = None) -> None:
"""
:param dataset: a dataset
:num_samples: number of samples to draw. If None the number of samples
corresponds to the length of the dataset.
"""
self.dataset = dataset
self.indices = list(range(len(dataset)))
self.weights = self.get_weights()
self.num_samples = len(dataset) if num_samples is None else num_samples
def get_weights(self) -> torch.Tensor:
labels = self.dataset.get_labels_for_imbalanced_sampler()
counts_per_label: Dict = Counter(labels)
return torch.tensor([1.0 / counts_per_label[labels[i]] for i in self.indices])
def __iter__(self) -> Any:
# noinspection PyTypeChecker
return iter([self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, # type: ignore
replacement=True)])
def __len__(self) -> int:
return self.num_samples
class RepeatDataLoader(DataLoader):
"""
This class implements a data loader that avoids spawning a new process after each epoch.
It uses an infinite sampler.
This is adapted from https://github.com/pytorch/pytorch/issues/15849
"""
def __init__(self,
dataset: Any,
max_repeats: int,
batch_size: int = 1,
shuffle: bool = False,
use_imbalanced_sampler: bool = False,
drop_last: bool = False,
**kwargs: Any):
"""
Creates a new data loader.
:param dataset: The dataset that should be loaded.
:param batch_size: The number of samples per minibatch.
:param shuffle: If true, the dataset will be shuffled randomly.
:param drop_last: If true, drop incomplete minibatches at the end.
:param kwargs: Additional arguments that will be passed through to the Dataloader constructor.
"""
sampler = RandomSampler(dataset) if shuffle else SequentialSampler(dataset)
if use_imbalanced_sampler:
sampler = ImbalancedSampler(dataset)
self._actual_batch_sampler = BatchSampler(sampler, batch_size, drop_last)
repeat_sampler = _RepeatSampler(self._actual_batch_sampler, batch_size=batch_size, max_repeats=max_repeats)
super().__init__(dataset=dataset, batch_sampler=repeat_sampler, **kwargs)
self.iterator = None
def __len__(self) -> int:
return len(self._actual_batch_sampler)
def __iter__(self) -> Any:
if self.iterator is None:
self.iterator = super().__iter__() # type: ignore
assert self.iterator is not None # for mypy
for i in range(len(self)):
yield next(self.iterator)
D = TypeVar('D', bound=ModelConfigBase)
class GeneralDataset(Dataset, ABC, Generic[D]):
def __init__(self, args: D, data_frame: Optional[pd.DataFrame] = None,
name: Optional[str] = None):
self.name = name or "None"
self.args = args
self.data_frame = args.dataset_data_frame if data_frame is None else data_frame
logging.info(f"Processing dataset (name={self.name})")
def as_data_loader(self,
shuffle: bool,
batch_size: Optional[int] = None,
num_dataload_workers: Optional[int] = None,
use_imbalanced_sampler: bool = False,
drop_last_batch: bool = False,
max_repeats: Optional[int] = None) -> DataLoader:
num_dataload_workers = num_dataload_workers or self.args.num_dataload_workers
batch_size = batch_size or self.args.train_batch_size
if self.args.avoid_process_spawn_in_data_loaders:
if max_repeats is None:
max_repeats = self.args.get_total_number_of_training_epochs()
return RepeatDataLoader(
self,
max_repeats=max_repeats,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_dataload_workers,
pin_memory=self.args.pin_memory,
worker_init_fn=set_random_seed_for_dataloader_worker,
collate_fn=collate_with_metadata,
use_imbalanced_sampler=use_imbalanced_sampler,
drop_last=drop_last_batch
)
else:
if use_imbalanced_sampler:
sampler: Optional[Sampler] = ImbalancedSampler(self)
shuffle = False
else:
sampler = None
return DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_dataload_workers,
pin_memory=self.args.pin_memory,
worker_init_fn=set_random_seed_for_dataloader_worker,
collate_fn=collate_with_metadata,
sampler=sampler, # type: ignore
drop_last=drop_last_batch
)
class FullImageDataset(GeneralDataset):
"""
Dataset class that loads and creates samples with full 3D images from a given pd.Dataframe. The following
are the operations performed to generate a sample from this dataset:
-------------------------------------------------------------------------------------------------
1) On initialization parses the provided pd.Dataframe with dataset information, to cache the set of file paths
and patient mappings to load as PatientDatasetSource. The sources are then saved in a list: dataset_sources.
2) dataset_sources is iterated in a batched fashion, where for each batch it loads the full 3D images, and applies
pre-processing functions (e.g. normalization), returning a sample that can be used for full image operations.
"""
def __init__(self, args: SegmentationModelBase, data_frame: pd.DataFrame,
full_image_sample_transforms: Optional[Compose3D[Sample]] = None):
super().__init__(args, data_frame)
self.full_image_sample_transforms = full_image_sample_transforms
# Check base_path
assert self.args.local_dataset is not None
if not self.args.local_dataset.is_dir():
raise ValueError("local_dataset should be the path to the base directory of the data: {}".
format(self.args.local_dataset))
# cache all of the available dataset sources
self._get_file_extension()
if self._is_nifti_dataset():
dataloader: Callable[[], Any] = self._load_dataset_sources
else:
raise Exception("Files should be Nifti, but found {0}".format(self.file_extension))
self.dataset_sources: Union[Dict[IntOrString, PatientDatasetSource]] = dataloader()
self.dataset_indices = sorted(self.dataset_sources.keys())
def __len__(self) -> int:
return len(self.dataset_indices)
def __getitem__(self, i: int) -> Dict[str, Any]:
return self.get_samples_at_index(index=i)[0].get_dict()
@staticmethod
def _extension_from_df_file_paths(file_paths: List[str]) -> str:
file_extensions = [f.split('.')[-2] if f.endswith(tuple(COMPRESSION_EXTENSIONS))
else f.split('.')[-1] for f in file_paths]
if len(file_extensions) == 0:
raise Exception("No files of expected format (Nifti) were found")
# files must all be of same type
unique_file_extensions = list(set(file_extensions))
if len(unique_file_extensions) > 1:
raise Exception("More than one file type was found. This is not supported.")
return "." + unique_file_extensions[0]
def _is_nifti_dataset(self) -> bool:
return is_nifti_file_path(self.file_extension)
def _get_file_extension(self) -> None:
file_extension = self._extension_from_df_file_paths(self.data_frame[CSV_PATH_HEADER].values) # type: ignore
self.file_extension = file_extension
if not (self._is_nifti_dataset()):
raise Exception("Wrong file type provided. Must be Nifti.")
def extract_spacing(self, patient_id: IntOrString) -> TupleFloat3:
"""
extract spacing for that particular image using the first image channel
:param patient_id:
:return:
"""
return io_util.load_nifti_image(self.dataset_sources[patient_id].image_channels[0]).header.spacing
def get_samples_at_index(self, index: int) -> List[Sample]:
# load the channels into memory
if not self._is_nifti_dataset():
raise ValueError("Unknown file extension. Files should be Nifti or HDF5 format but found "
+ self.file_extension)
ds = self.dataset_sources[self.dataset_indices[index]]
samples = [io_util.load_images_from_dataset_source(dataset_source=ds)] # type: ignore
return [Compose3D.apply(self.full_image_sample_transforms, x) for x in samples]
def _load_dataset_sources(self) -> Dict[int, PatientDatasetSource]:
assert self.args.local_dataset is not None
return load_dataset_sources(dataframe=self.data_frame,
local_dataset_root_folder=self.args.local_dataset,
image_channels=self.args.image_channels,
ground_truth_channels=self.args.ground_truth_ids,
mask_channel=self.args.mask_id
)
def load_dataset_sources(dataframe: pd.DataFrame,
local_dataset_root_folder: Path,
image_channels: List[str],
ground_truth_channels: List[str],
mask_channel: Optional[str]) -> Dict[int, PatientDatasetSource]:
"""
Prepares a patient-to-images mapping from a dataframe read directly from a dataset CSV file.
The dataframe contains per-patient per-channel image information, relative to a root directory.
This method converts that into a per-patient dictionary, that contains absolute file paths
separated for for image channels, ground truth channels, and mask channels.
:param dataframe: A dataframe read directly from a dataset CSV file.
:param local_dataset_root_folder: The root folder that contains all images.
:param image_channels: The names of the image channels that should be used in the result.
:param ground_truth_channels: The names of the ground truth channels that should be used in the result.
:param mask_channel: The name of the mask channel that should be used in the result. This can be None.
:return: A dictionary mapping from an integer subject ID to a PatientDatasetSource.
"""
expected_headers = {CSV_SUBJECT_HEADER, CSV_PATH_HEADER, CSV_CHANNEL_HEADER}
# validate the csv file
actual_headers = list(dataframe)
if not expected_headers.issubset(actual_headers):
raise ValueError("The dataset CSV file should contain at least these columns: {}, but got: {}"
.format(expected_headers, actual_headers))
# Calculate unique data points, first, and last data point
unique_ids = sorted( | pd.unique(dataframe[CSV_SUBJECT_HEADER]) | pandas.unique |
import pandas as pd
import glob
import os
import configargparse as argparse
from net_benefit_ascvd.prediction_utils.util import df_dict_concat
parser = argparse.ArgumentParser(
config_file_parser_class=argparse.YAMLConfigFileParser,
)
parser.add_argument(
"--project_dir",
type=str,
required=True
)
parser.add_argument("--task_prefix", type=str, required=True)
parser.add_argument(
"--selected_config_experiment_suffix",
type=str,
default="selected",
)
if __name__ == "__main__":
args = parser.parse_args()
project_dir = args.project_dir
task_prefix = args.task_prefix
def get_config_df(experiment_name):
config_df_path = os.path.join(
os.path.join(
project_dir, "experiments", experiment_name, "config", "config.csv"
)
)
config_df = | pd.read_csv(config_df_path) | pandas.read_csv |
"""
This module defines a Feed class and a PT_network class based on a GTFS feeds.
There is an instance attribute for every GTFS table (routes, stops, etc.),
which stores the table as a Pandas DataFrame,
or as ``None`` in case that table is missing.
The Feed class also has heaps of methods. For more information
check <NAME>'s GTFS toolkit.
I have added few more functionality to the Feed object so I can
use it to calculate the transit catchments.
"""
import tempfile
import time
import shutil
import math
import sys
from pathlib import Path
from datetime import datetime, timedelta
from dateutil.parser import parse
import pandas as pd
import numpy as np
import geopandas as gp
import shapely.geometry as geom
import gtfstk as gt
from . import helpers as hp
class Feed(gt.Feed):
def __init__(self, dist_units, agency=None, stops=None, routes=None,
trips=None, stop_times=None, calendar=None, calendar_dates=None,
fare_attributes=None, fare_rules=None, shapes=None,
frequencies=None, transfers=None, feed_info=None, feed_segments=None,
valid_date=None):
self.dist_units = dist_units
self.agency = agency
self.stops = stops
self.routes = routes
self.trips = trips
self.stop_times = stop_times
self.calendar = calendar
self.calendar_dates = calendar_dates
self.fare_attributes = fare_attributes
self.fare_rules = fare_rules
self.shapes = shapes
self.frequencies = frequencies
self.transfers = transfers
self.feed_info = feed_info
self.feed_segments = feed_segments
self.valid_date = valid_date
@property
def valid_day_of_week(self):
self.valid_day_of_week = parse(valid_date).weekday('%A')
class pt_network:
def __init__(
self,
feed=None,
ptedge=None,
wedge=None,
analysis_start=None,
analysis_end=None,
transfer_duration=None,
walk_speed_kph=None,
):
self.feed = feed #feed object is generated from read_gtfs
self.ptedge = ptedge #links stops by transit
self.wedge = wedge #links stops by walking
self.analysis_start = analysis_start #Analysis start time
self.analysis_end = analysis_end #Analysis end time
self.transfer_duration = transfer_duration
self.walk_speed_kph = walk_speed_kph
@property
def analysis_start_sec(self):
return gt.timestr_to_seconds(self.analysis_start)
@property
def analysis_end_sec(self):
return gt.timestr_to_seconds(self.analysis_end)
@property
def analysis_duration_sec(self):
return self.analysis_end_sec - self.analysis_start_sec
@property
def analysis_time_sec(self): #Middle point for our analysis
return self.analysis_start_sec + self.analysis_duration_sec/2
@property
def analysis_time(self):
return hp.sec2text(self.analysis_time_sec)
@property
def transfer_duration_sec(self):
return self.transfer_duration * 60
"""
Functions about creating abundant access.
"""
def read_gtfs(
path,
dt, #date to validate feed upon, it can be like "Thrusday" or "20181201"
dist_units=None):
"""
Create a Feed instance from the given path and given distance units.
The path should be a directory containing GTFS text files or a
zip file that unzips as a collection of GTFS text files
(and not as a directory containing GTFS text files).
The distance units given must lie in :const:`constants.dist_units`
Notes
-----
- Ignore non-GTFS files
- Automatically strip whitespace from the column names in GTFS files
- This is based on gtfstk library
"""
gt_feed = gt.read_gtfs(path, dist_units)
#Validate feed for an specific day (eigther a date or the day of week)========
if not gt.valid_date(dt):
dt = gt_feed.get_first_week()[parse(dt).weekday()]
gt_feed = hp.validate_feed(gt_feed, dt)
feed_dict = hp.feed_obj_to_dict(gt_feed)
feed_dict['valid_date'] = dt
#calculate PT segments========================================================
PT_links_df = feed_dict['stop_times'].copy()
#making sure trips are sorted by the trip sequence
PT_links_df.sort_values(by=['trip_id', 'stop_sequence'], inplace = True)
#converting the stop_times into pt links
PT_links_df.rename(columns = {'arrival_time': 'o_time',
'stop_id': 'o_stop',
'stop_sequence': 'o_sequence'}, inplace = True)
PT_links_df[['d_time', 'd_stop', 'd_sequence']] = PT_links_df[['o_time', 'o_stop', 'o_sequence']].shift(-1)
PT_links_df = PT_links_df[PT_links_df['o_sequence'] < PT_links_df['d_sequence']].copy() #removes the last stops
#Convert the time into seconds for easier time calculatins
PT_links_df['o_time_sec'] = PT_links_df['o_time'].apply(hp.text2sec)
PT_links_df['d_time_sec'] = PT_links_df['d_time'].apply(hp.text2sec)
PT_links_df['duration'] = PT_links_df['d_time_sec'] - PT_links_df['o_time_sec']
#Add route_id using the trips table
PT_links_df = PT_links_df.merge(feed_dict['trips'])
#Add route type in text format to the link dataset
PT_links_df = PT_links_df.merge(feed_dict['routes'])
route_type = {'0': 'Tram, Streetcar, Light rail',
'1': 'Subway, Metro',
'2': 'Rail',
'3': 'Bus',
'4': 'Ferry',
'5': 'Cable car',
'6': 'Gondola, Suspended cable car',
'7': 'Funicular'}
PT_links_df['route_type'] = PT_links_df['route_type'].astype(str)
PT_links_df['route_type'].replace(route_type, inplace = True)
#add stop sequence to PT_links_df
def stop_seq_for_trips(stop_times_df):
"""
The objective is to create a dataframe of stop sequence for each trip
The output format will be:
first field is: trip_ids
seocond field is: stop_ids separeated by comma in order of their sequence
"""
def get_first_trip(group):
stop_seq = ";".join(group['stop_id'].tolist())+";"
trip_id = group['trip_id'].iat[0]
trip_dict = {'stop_seq': stop_seq,
'trip_id': trip_id}
return pd.DataFrame(trip_dict, index=[0])
stop_seq_df = stop_times_df.groupby('trip_id').apply(get_first_trip).reset_index(drop=True)
return stop_seq_df
stop_seq_df = stop_seq_for_trips(feed_dict['stop_times'])
PT_links_df = PT_links_df.merge(stop_seq_df)
def remaining_stops(row):
sid = row['o_stop']+";"
seq = row['stop_seq']
return seq.split(sid, 1)[-1]
PT_links_df['stop_seq'] = PT_links_df.apply(remaining_stops, axis = 1)
# add stops lat and lon
PT_links_df = PT_links_df.merge(feed_dict['stops'][['stop_id', 'stop_lat', 'stop_lon']],
left_on='o_stop',
right_on='stop_id',
how='left').drop('stop_id', axis = 1)
PT_links_df.rename(columns = {'stop_lat': 'o_stop_lat',
'stop_lon': 'o_stop_lon'},
inplace = True)
PT_links_df = PT_links_df.merge(feed_dict['stops'][['stop_id', 'stop_lat', 'stop_lon']],
left_on='d_stop',
right_on='stop_id',
how='left').drop('stop_id', axis = 1)
PT_links_df.rename(columns = {'stop_lat': 'd_stop_lat',
'stop_lon': 'd_stop_lon'},
inplace = True)
feed_dict['feed_segments'] = PT_links_df
for key in ['_trips_i', '_calendar_i', '_calendar_dates_g']:
if key in feed_dict:
del feed_dict[key]
return Feed(**feed_dict)
def get_bounding_box(latitude_in_degrees, longitude_in_degrees, half_side_in_m):
"""
Makes a box around a location using half size of a side and returns the
mionimum and maximum coordinates in WGS 1984.
"""
assert half_side_in_m > 0
assert latitude_in_degrees >= -180.0 and latitude_in_degrees <= 180.0
assert longitude_in_degrees >= -180.0 and longitude_in_degrees <= 180.0
half_side_in_km = half_side_in_m / 1000
lat = math.radians(latitude_in_degrees)
lon = math.radians(longitude_in_degrees)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius * math.cos(lat)
lat_min = lat - half_side_in_km/radius
lat_max = lat + half_side_in_km/radius
lon_min = lon - half_side_in_km/parallel_radius
lon_max = lon + half_side_in_km/parallel_radius
rad2deg = math.degrees
lat_min = rad2deg(lat_min)
lon_min = rad2deg(lon_min)
lat_max = rad2deg(lat_max)
lon_max = rad2deg(lon_max)
return {'lat_min':lat_min, 'lat_max':lat_max, 'lon_min':lon_min, 'lon_max':lon_max}
def around_stops(
stops, #GTFS feed df of stops
walk_duration_sec, #Walking time
walk_speed_kmh, #Walking speed
lat, #Origin Lat
lon #origin Lon
):
"""
This function gets a stops in pd format and a location. Then extracts stops that
are around the input stop based on the walk duration and speed.
Note:
- stops is a pandas dataframe of stops.txt in GTFS
"""
assert lat >= -180.0 and lat <= 180.0
assert lon >= -180.0 and lon <= 180.0
walk_dist_m = walk_speed_kmh * 1000 / 3600 * walk_duration_sec
box = get_bounding_box(lat, lon, walk_dist_m)
cond = (stops['stop_lat'] > box['lat_min'])&\
(stops['stop_lon'] > box['lon_min'])&\
(stops['stop_lat'] < box['lat_max'])&\
(stops['stop_lon'] < box['lon_max'])
stops_df = stops[cond].copy()
if not stops_df.empty:
cols = ['stop_lat', 'stop_lon']
stops_df['walk_to_stop_m'] = stops_df[cols].apply(lambda row: hp.distance_m(lat, lon, row[0], row[1]), axis = 1)
stops_df['walk_to_stop_sec'] = stops_df['walk_to_stop_m'] / walk_speed_kmh / 1000 * 3600
stops_df = stops_df[['stop_id', 'walk_to_stop_sec']][stops_df['walk_to_stop_sec'] < walk_duration_sec].copy()
stops_df.reset_index(drop = True)
return stops_df
def connect2ptnetwork(
pt_network,
start_location, #(x, y)
walk2transit):
"""
Generates a dataframe connecting all nodes (stops and origins) in the network to each other:
The connection is based on walking time and walking speed.
"""
feed = pt_network.feed
stops = feed.stops
origin_lat = start_location[1]
origin_lon = start_location[0]
walk_graph = around_stops(
stops,
walk2transit,
pt_network.walk_speed_kph,
origin_lat,
origin_lon)
walk_graph['o_type'] = 'Start Location'
walk_graph.rename(columns = {"stop_id": "d_stop"}, inplace = True)
return walk_graph
def get_slice(links_df, around_stops_df):
"""
the objective here is to ge a list of stops and find out what trips pass these stops after
a specific time. Only trips that leave after we arrive at stop are valid. The last trip we can
take is the one leaves before the total time we are willing to wait.
Notes:
- links_df is cleand version of all links in GTFS. By clean I mean no links for before our analysis
starts and no links after our analysis ends.
- stop_seq_df is a data frame of trips and sequence of stops
- around_stops_df is a data frame of stops where people can walk to. It is also cleaned and
actual arrival time in text and second formats has been added
First, we cut the links that start from stops we can reach
Second, we remove any service that leaves before we arrive
third, we add stop sequence to the data frame, this will be later used to remove
trips with exact same pattern. In other word no one take the second bus of two exact
same serivces.
"""
first_link_df = links_df.merge(around_stops_df, left_on='o_stop', right_on='d_stop', how='inner')
# Now we remove all trip_ids that pass from our stop. In other words, no one would take a service twice.
ramining_links_df = links_df[~links_df['trip_id'].isin(first_link_df['trip_id'])].copy().reset_index(drop = True)
cond = first_link_df['o_time'] >= first_link_df['arrival_time']
first_link_df = first_link_df[cond].copy().reset_index(drop=True)
if not first_link_df.empty:
#This part of the code is a mistry! but it cleans the first link errors and it is curcial to the rest of the code
first_link_df = first_link_df.drop_duplicates(['stop_seq'])
def clean_first_link(group):
arrival_time_sec = group['arrival_time_sec'].min()
output = group[group['arrival_time_sec'] == arrival_time_sec].copy()
output = output.drop_duplicates(['trip_id'])
return output
first_link_df = first_link_df.groupby('trip_id').apply(clean_first_link).reset_index(drop = True)
#=================================================================================================================
first_link_df['wt'] = first_link_df['o_time_sec'] - first_link_df['arrival_time_sec']
# now we select the links that shape the tail of trips we can reach. By tail I mean
# the ramining of the a full trip that is after my stop.cmd
first_link_df = first_link_df[['trip_id', 'o_sequence', 'arrival_time_sec', 'd_tt', 'wt', 'awt']].copy()
first_link_df.rename(columns = {'o_sequence': 'min_seq',
'arrival_time_sec': 'arrive_at_link',
'd_tt': 'o_tt',
'awt': 'o_awt'}, inplace = True)
#selects all trips that pass based on trip id from links_df
first_trips_df = links_df.merge(first_link_df).reset_index(drop = True)
cond = (first_trips_df['o_sequence'] >= first_trips_df['min_seq'])
first_trips_df = first_trips_df[cond].copy().reset_index(drop = True)
#first_trips_df.awt.fillna(first_trips_df.wt, inplace=True)
first_trips_df['tt'] = (first_trips_df['d_time_sec'] - \
first_trips_df['arrive_at_link'] -\
first_trips_df['wt'] +\
first_trips_df['o_awt'])
first_trips_df['d_tt'] = first_trips_df['o_tt'] + first_trips_df['tt']
first_trips_df = first_trips_df.drop(['min_seq'], axis=1)
else:
first_trips_df = pd.DataFrame()
return first_trips_df, ramining_links_df
def walk_to_next_stop(walk_edges, previous_slice_df):
"""
stops_df is from GTFS
stops_ids is a pandas series
"""
def add_around_stops(group):
o_stop = group.name
o_tt = group['d_tt'].min()
o_time_sec = group[group['d_tt'] == o_tt]['d_time_sec'].min() #end of the link is start of walk to next stop
around_stops_df = walk_edges[walk_edges['o_stop'].isin(previous_slice_df['d_stop'])].copy()
around_stops_df.rename(columns = {'stop_id': 'd_stop'}, inplace = True)
around_stops_df['arrival_time_sec'] = around_stops_df['walk_to_stop_sec'] + o_time_sec
around_stops_df['arrival_time'] = around_stops_df['arrival_time_sec'].map(hp.sec2text)
around_stops_df['o_tt'] = o_tt
return around_stops_df
around_stops_df = previous_slice_df.groupby('d_stop').apply(add_around_stops).reset_index(drop = True)
around_stops_df['d_tt'] = around_stops_df['o_tt'] + around_stops_df['walk_to_stop_sec']
around_stops_df = around_stops_df.sort_values(['d_stop', 'd_tt'])
around_stops_df = around_stops_df.groupby('d_stop').first().reset_index()
#around_stops_df.drop('d_tt', axis=1, inplace=True)
return around_stops_df
def build_pt_network(
feed,
analysis_start = '07:00:00', #time in string 'HH:MM:SS'
analysis_end = '09:00:00', #time in string 'HH:MM:SS'
transfer_duration = 2, #2 minutes
walk_speed_kph = 4.8, #Walking speed in kilometer per hour
convert_to_gpd = False, #generates a geopandas database
):
assert gt.valid_time(analysis_start)
assert gt.valid_time(analysis_end)
pt_net = pt_network(
feed=feed,
analysis_start=analysis_start,
analysis_end=analysis_end,
transfer_duration=transfer_duration,
walk_speed_kph=walk_speed_kph)
#calculates the average wait time (awt) depending on the analysis awt period.
PT_links_df = feed.feed_segments
#removes the PT links outside the analysis awt period
cond = (PT_links_df['o_time_sec'] >= pt_net.analysis_start_sec)&\
(PT_links_df['d_time_sec'] <= pt_net.analysis_end_sec)
PT_links_df = feed.feed_segments[cond].copy()
#calculates the frequency of trips
frq_df = PT_links_df['stop_seq'].value_counts().reset_index()
frq_df.columns = ['stop_seq', 'service_cnt']
frq_df['headway_sec'] = (pt_net.analysis_duration_sec) /frq_df['service_cnt']
frq_df['headway_min'] = frq_df['headway_sec'] / 60
PT_links_df = PT_links_df.merge(frq_df, how = 'left')
#calculates the awt
PT_links_df['awt'] = PT_links_df['headway_sec'] / 2 #average waite time (sec) is half the headway
if convert_to_gpd == True:
#converting the PT_links_df to a geodataframe
l = lambda x: geom.LineString([geom.Point(x.o_stop_lon,x.o_stop_lat), geom.Point(x.d_stop_lon, x.d_stop_lat)])
PT_links_df['geometry'] = PT_links_df.apply(l, axis=1)
PT_links_gdf = gp.GeoDataFrame(PT_links_df)
pt_net.ptedge = PT_links_gdf
else:
pt_net.ptedge = PT_links_df
#connecting stops together with direct walking
stops = feed.stops
walk_graph = list()
for stop in stops[['stop_id', 'stop_lat', 'stop_lon']].iterrows():
s = around_stops(
stops,
pt_net.transfer_duration_sec,
walk_speed_kph,
stop[1][1],
stop[1][2])
s['o_stop'] = stop[1][0]
s['o_type'] = 'GTFS stop'
walk_graph.append(s)
wedge = pd.concat(walk_graph)
wedge.rename(columns = {"stop_id": "d_stop"}, inplace = True)
pt_net.wedge = wedge
return pt_net
def abundant_access_single(
pt_network,
start_location, #(x, y)
transfers = 999, #number of transfers
walk_to_transit = 5, # minutes
walk_from_transit = 5, #minutes
):
"""
The objective here is to find how much of a city is available to by PT from some locations at an specific time.
- The analysis date has to be valid for the feed. The format should be like 20170101
- The analysis time is text like '08:00:00'
The output can be point or polygon. If point, each point feature will have an attribute showing the remaining time
for walking from that point. If polygon, the output will be one multipart feature showing all
"""
#connect the start location to the pt network
feed = pt_network.feed
ptedge = pt_network.ptedge
wedge = pt_network.wedge
walk_to_transit = 5 * 60 #seconds
walk_from_transit = 5 * 60 #seconds
#finding around stops
ar_df = connect2ptnetwork(
pt_network,
start_location, #(x, y)
walk_to_transit)
ar_df['arrival_time_sec'] = pt_network.analysis_time_sec + ar_df['walk_to_stop_sec']
ar_df['arrival_time'] = ar_df['arrival_time_sec'].apply(hp.sec2text)
ar_df['o_tt'] = 0
ar_df['tt'] = ar_df['walk_to_stop_sec']
ar_df['d_tt'] = ar_df['o_tt']+ ar_df['walk_to_stop_sec']
#calculating abundant access
rl_df = ptedge # rl is remainging links
at = []
transfer = 0
while True:
#ft is first tirps
ft_df, rl_df = get_slice(rl_df, ar_df)
ft_df['transfer'] = transfer
transfer += 1
at.append(ft_df)
if (ft_df.empty) or (transfer >= transfers):
break
ar_df = walk_to_next_stop(wedge, ft_df)
all_trips = pd.concat(at).reset_index(drop = True)
if all_trips.empty:
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import os
import gc
import joblib
from sklearn.metrics import r2_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
import sklearn.metrics as skm
from sklearn.metrics import confusion_matrix
import time
import functions as func
import datetime
import univariatefunctions as ufunc
from multiprocessing import cpu_count
from joblib import Parallel
from joblib import delayed
def main():
method = 'OrgData'
# 'DOcategory', 'pHcategory',ysi_blue_green_algae (has negative values for leavon... what does negative mean!?)
# 'ysi_blue_green_algae'] # , 'dissolved_oxygen', 'ph']
targets = ['ph']
# 'ARIMA', 'SARIMA', 'ETS', 'AR', 'MA'
models = ['SARIMA']
path = 'Sondes_data/train_Summer/'
files = [f for f in os.listdir(path) if f.endswith(
".csv") and f.startswith('leavon')] # leavon bgsusd_all
for model_name in models:
for target in targets:
if target.find('category') > 0:
cat = 1
directory = 'Results/bookThree/output_Cat_' + \
model_name+'/oversampling_cv_models/'
data = {'CV': 'CV', 'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'window_nuggets': 'window_nuggets', 'config': 'config',
'file_names': 'file_names', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta'}
else:
cat = 0
directory = 'Results/bookThree/output_Reg_' + \
model_name+'/oversampling_cv_models/'
data = {'CV': 'CV', 'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'window_nuggets': 'window_nuggets', 'config': 'config',
'file_names': 'file_names', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mpe': 'mpe', 'rmse': 'rmse', 'R2': 'R2'}
if not os.path.exists(directory):
os.makedirs(directory)
for file in files:
print(file)
result_filename = 'results_'+target + \
'_'+file + '_'+str(time.time())+'.csv'
dfheader = pd.DataFrame(data=data, index=[0])
dfheader.to_csv(directory+result_filename, index=False)
n_steps = 1
for PrH_index in [1, 3, 6, 12, 24, 36]:
dataset = pd.read_csv(path+file)
# Only the Target
dataset = dataset[[
'year', 'month', 'day', 'hour', target]]
print('Window: '+str(n_steps) + ' TH: ' +
str(PrH_index)+' '+method+' '+target)
i = 1
if model_name == 'MA':
train_X_grid, train_y_grid, input_dim, features = func.preparedata(
dataset, PrH_index, n_steps, target, cat)
start_time = time.time()
# For Train files:
custom_cv = func.custom_cv_2folds(train_X_grid, 3)
for train_index, test_index in custom_cv:
train_X = train_X_grid[train_index]
train_y = train_y_grid[train_index]
train_X_uni = train_X[:, -1]
test_X = train_X_grid[test_index]
# actual future values
test_X_uni = test_X[:, -1]
test_y = train_y_grid[test_index]
predictions = ufunc.movingAverage(
train_X_uni, train_y, test_X_uni, test_y)
df_time = pd.DataFrame({
'year': np.array(test_X[:, 0]).astype(int), 'month': np.array(test_X[:, 1]).astype(int),
'day': np.array(test_X[:, 2]).astype(int), 'hour': np.array(test_X[:, 3]).astype(int),
})
timeline = pd.to_datetime(
df_time, format='%Y%m%d %H')
if cat == 1:
predictions = np.array(predictions).astype(int)
test_y = np.array(test_y).astype(int)
# test_y = test_y.reshape(len(test_y),)
# predictions = predictions.reshape(
# len(predictions),)
cm0 = func.forecast_accuracy(
predictions, test_y, cat)
filename = file + '_' + \
target+'_TH' + \
str(PrH_index)+'_lag' + \
str(n_steps)+'_'+str(i)
plt.scatter(timeline.values,
test_y, s=1)
plt.scatter(timeline.values,
predictions, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
plt.xticks(rotation=45)
directorydeeper = directory+'more/'
if not os.path.exists(directorydeeper):
os.makedirs(directorydeeper)
plt.savefig(directorydeeper+filename+'.jpg')
plt.close()
data = {'time': timeline,
'Actual': test_y,
'Predictions': predictions}
df = pd.DataFrame(data=data)
df.to_csv(directorydeeper+filename +
'.csv', index=False)
if cat == 1:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,
'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}
elif cat == 0:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,
'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}
df = pd.DataFrame(data=data, index=[0])
df.to_csv(directory+result_filename,
index=False, mode='a', header=False)
i = i + 1
elapsed_time = time.time() - start_time
print(time.strftime("%H:%M:%S",
time.gmtime(elapsed_time)))
if model_name == 'ARIMA' or model_name == 'AR' or model_name == 'ETS' or model_name == 'SARIMA' or model_name == 'BL':
start_time = time.time()
train_X_grid = dataset.values
custom_cv = ufunc.custom_cv_2folds(
train_X_grid, 1, PrH_index)
######################
# Cross Validation sets
######################
i = 1
for train_index, test_index in custom_cv:
train_X = train_X_grid[train_index]
train_X_uni = train_X[:, -1]
test_X = train_X_grid[test_index]
# actual future values
test_X_uni = test_X[:, -1]
df_time = pd.DataFrame({
'year': np.array(test_X[:, 0]).astype(int), 'month': np.array(test_X[:, 1]).astype(int),
'day': np.array(test_X[:, 2]).astype(int), 'hour': np.array(test_X[:, 3]).astype(int),
})
timeline = pd.to_datetime(
df_time, format='%Y%m%d %H')
if model_name == 'BL':
# train_X_uni,test_X_uni
# make them into dataFrame so below can be done
test_X_uni = pd.DataFrame(test_X_uni)
target_values = test_X_uni.drop(
test_X_uni.index[0: 1], axis=0)
target_values.index = np.arange(
0, len(target_values))
# test_X_uni = pd.DataFrame(test_X_uni)
predictions = test_X_uni.drop(
test_X_uni.index[len(test_X_uni)-1: len(test_X_uni)], axis=0)
test_X_uni = target_values
timeline = timeline.drop(
timeline.index[len(timeline)-1: len(timeline)], axis=0)
cm0 = func.forecast_accuracy(
predictions, test_X_uni, cat)
filename = file + '_' + \
target+'_TH' + \
str(PrH_index)+'_lag' + \
str(n_steps)+'_'+str(i)
plt.scatter(timeline.values,
test_X_uni, s=1)
plt.scatter(timeline.values,
predictions, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
plt.xticks(rotation=45)
directorydeeper = directory+'more/'
if not os.path.exists(directorydeeper):
os.makedirs(directorydeeper)
plt.savefig(directorydeeper+filename+'.jpg')
plt.close()
print(predictions.head())
print(test_X_uni.head())
print(timeline.head())
# data = {'time': timeline,
# 'Actual': test_X_uni,
# 'Predictions': predictions}
frames = [timeline, test_X_uni, predictions]
df = pd.concat(frames, axis=1)
df.to_csv(directorydeeper+filename +
'.csv', index=False, header=['time', 'Actual', 'Predictions'])
if cat == 1:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,
'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}
elif cat == 0:
data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,
'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}
df = | pd.DataFrame(data=data, index=[0]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .utility_fxns import distribute
def generate_id_dict(id_list, prod_ids, df):
''' docstring for generate_id_dict
input: product id list
output: dictionary of
key: product id
values: [position of product id in full matrix
, number of skus
, sku product ids]'''
id_dict = {}
for i in prod_ids:
pos = id_list.index(i)
j = 1
sku_ids = []
flag = True
while flag:
step = pos + j
if (df.item_type[step] == 'Product') & (j == 1):
j = 0
flag = False
elif df.item_type[step] == 'Product':
j -= 1
flag = False
elif df.item_type[step] == 'SKU':
j += 1
sku_ids.append(df.product_id[step])
else:
# not a product or sku
j = 0
flag = False
id_dict[i] = [pos, j, sku_ids]
return id_dict
def sku_combo_dicts_v2(file_list):
'''docstring for sku_combo_dicts'''
file = 'color_table.csv'
filename = [i for i in file_list if file in i][0]
df = | pd.read_csv(filename) | pandas.read_csv |
from __future__ import division
from psychopy.visual import ImageStim, TextStim, Window
from psychopy import core, event, gui, data, logging
import numpy as np
import pandas as pd
import os
from routines import Routine
#general settings
expName = 'wpa_6'
screen_size = [800, 600]
frames_per_second = 60
full_screen = False
background_color = '#eeeeee'
# trial settings
choice_keys = ['q', 'p']
escape_key = 'escape'
choice_time_limit = 5
feedback_duration = 2
fixation_duration = 1.5
#stimuli settings
text_color = 'black'
text_height = 50
options_x_offset = 200
image_size = 100
#store info about the experiment session
dlg = gui.Dlg(title=expName)
dlg.addField('Participant:', 1)
dlg.addField('Age:', 25)
dlg.addField('Gender:', choices=['female', 'male', 'prefer not to disclose'])
dlg.addField('Handedness:', choices=['right', 'left', 'both'])
dlg.show()
expInfo = dict(zip(['participant', 'age', 'gender', 'hand'], dlg.data))
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName # add the experiment name
if dlg.OK: # then the user pressed OK
print(expInfo)
else:
print(expInfo)
core.quit()
#check if data folder exists
directory=os.path.join(os.getcwd(), 'data')
if not os.path.exists(directory):
os.makedirs(directory)
#create file name for storing data
fileName = os.path.join('data', '%s_%s_%s' % (expName, expInfo['participant'], expInfo['date']))
#save a log file
logFile = logging.LogFile(fileName + '.log', level=logging.DEBUG)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
#create a window
mywin = Window(screen_size, units='pix', color=background_color, fullscr=full_screen)
#create some stimuli
n_trials = 30
A_feedback = TextStim(win=mywin, color=text_color, pos=(-options_x_offset, 0), height=text_height)
B_feedback = TextStim(win=mywin, color=text_color, pos=(options_x_offset, 0), height=text_height)
A_feedback_trials = np.random.binomial(n=1, p=.4, size=n_trials)
B_feedback_trials = np.random.binomial(n=1, p=.6, size=n_trials)
A_picture = ImageStim(win=mywin,
pos=(-options_x_offset, 0),
size=image_size,
image=os.path.join(os.getcwd(), 'stimuli', 'A.png'))
B_picture = ImageStim(win=mywin,
pos=(options_x_offset, 0),
size=image_size,
image=os.path.join(os.getcwd(), 'stimuli', 'B.png'))
fixation_cross = TextStim(win=mywin, text='+', color=text_color)
#create the dataframe
data = | pd.DataFrame([]) | pandas.DataFrame |
from __future__ import print_function
import pandas as pd
import os
import logging
import argparse
'''
This file reads in data related E. coli levels
in Chicago beaches. It is based on the files
analysis.R and split_sheets.R, and is written
such that the dataframe loaded here will match
the R dataframe code exactly.
'''
# This is an adaptation of previous read_data.py so that it runs on Python3
# Some variable names changed. Notably, Client.ID is now Beach
# Added day of week and month variables
# Also adds columns to dataframe:
# YesterdayEcoli : prior days reading
# DayBeforeYesterdayEcoli : two days prior reading
# actual_elevated : where Escherichia_coli >=235
# predicted_elevated : where Drek_Prediction >=235
#
# TODO: verbose
# TODO: use multi-level index on date/beach ?
# TODO: standardize on inplace=True or not inplace
# TODO: how much consistency do we want between python columns
# and the R columns?
# TODO: create better docstrings
# TODO: remove print statements and the import
# TODO: loyola/leone the same?
# TODO: repeats on 2015-06-16 ?
# and some of 2012?
# Just check for these everywhere, why is it happening?
def split_sheets(file_name, year, verbose=False):
'''
Reads in all sheets of an excel workbook, concatenating
all of the information into a single dataframe.
The excel files were unfortunately structured such that
each day had its own sheet.
'''
xls = pd.ExcelFile(file_name)
dfs = []
standardized_col_names = [
'Date', 'Laboratory_ID', 'Beach', 'Reading1',
'Reading2', 'Escherichia_coli', 'Units', 'Sample_Collection_Time'
]
for i, sheet_name in enumerate(xls.sheet_names):
if not xls.book.sheet_by_name(sheet_name).nrows:
# Older versions of ExcelFile.parse threw an error if the sheet
# was empty, explicitly check for this condition.
logging.debug('sheet "{0}" from {1} is empty'.format(sheet_name,
year))
continue
df = xls.parse(sheet_name)
if i == 0 and len(df.columns) > 30:
# This is the master/summary sheet
logging.debug('ignoring sheet "{0}" from {1}'.format(sheet_name,
year))
continue
if df.index.dtype == 'object':
# If the first column does not have a label, then the excel
# parsing engine will helpfully use the first column as
# the index. This is *usually* helpful, but there are two
# days when the first column is missing the typical label
# of 'Laboratory ID'. In this case, peel that index off
# and set its name.
msg = '1st column in sheet "{0}" from {1} is missing title'.format(
sheet_name, year)
logging.debug(msg)
df.reset_index(inplace=True)
df.columns = ['Laboratory ID'] + df.columns.tolist()[1:]
# Insert name of sheet as first column, the sheet name is the date
df.insert(0, u'Date', sheet_name)
for c in df.columns.tolist():
if 'Reading' in c:
# There are about 10 days that have >2 readings for some reason
if int(c[8:]) > 2:
logging.info('sheet "{0}" from {1} has >2 readings'.format(
sheet_name, year)
)
df.drop(c, 1, inplace=True)
# Only take the first 8 columns, some sheets erroneously have >8 cols
df = df.ix[:,0:8]
# Standardize the column names
df.columns = standardized_col_names
dfs.append(df)
df = pd.concat(dfs)
df.insert(0, u'Year', str(year))
logging.info('Removing data with missing Client ID')
df.dropna(subset=['Beach'], inplace=True)
return df
def read_holiday_data(file_name, verbose=False):
df = pd.read_csv(file_name)
df['Date'] = pd.to_datetime(df['Date'])
return df
def read_water_sensor_data(verbose=False):
'''
Downloads and reads water sensor data from the Chicago data
portal. Downsamples the readings into the min, mean, and max
for each day and for each sensor. Each day only has one row,
with many columns (one column each per sensor per reading per
type of down-sampling process)
'''
url = 'https://data.cityofchicago.org/api/views/qmqz-2xku/rows.csv?accessType=DOWNLOAD'
water_sensors = pd.read_csv(url)
url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'
sensor_locations = pd.read_csv(url)
df = pd.merge(water_sensors, sensor_locations,
left_on='Beach Name', right_on='Sensor Name')
df.drop(['Sensor Type', 'Location'], 1, inplace=True)
# TODO: map sensor to beach ???
df['Beach Name'] = df['Beach Name'].apply(lambda x: x[0:-6])
df['Measurement Timestamp'] = pd.to_datetime(df['Measurement Timestamp'])
df['Date'] = pd.DatetimeIndex(df['Measurement Timestamp']).normalize()
df.drop(['Battery Life', 'Measurement Timestamp', 'Measurement Timestamp Label',
'Measurement ID', 'Sensor Name'], axis=1, inplace=True)
df_mins = df.groupby(['Beach Name', 'Date'], as_index=False).min()
df_means = df.groupby(['Beach Name', 'Date'], as_index=False).mean()
df_maxes = df.groupby(['Beach Name', 'Date'], as_index=False).max()
df_mins.drop(['Latitude','Longitude'],1,inplace=True)
df_means.drop(['Latitude','Longitude'],1,inplace=True)
df_maxes.drop(['Latitude','Longitude'],1,inplace=True)
cols = df_mins.columns.tolist()
def rename_columns(cols, aggregation_type):
cols = list(map(lambda x: x.replace(' ', '_'), cols))
for i in range(2,7):
cols[i] = cols[i] + '_' + aggregation_type
return cols
df_mins.columns = rename_columns(cols, 'Min')
df_means.columns = rename_columns(cols, 'Mean')
df_maxes.columns = rename_columns(cols, 'Max')
df = pd.merge(df_mins, df_means, on=['Beach_Name', 'Date'])
df = pd.merge(df, df_maxes, on=['Beach_Name', 'Date'])
df = df.pivot(index='Date', columns='Beach_Name')
df.columns = ['.'.join(col[::-1]).strip() for col in df.columns.values]
df.reset_index(inplace=True)
df.columns = ['Full_date'] + list( map(lambda x: x.replace(' ', '_'), df.columns.tolist()[1:]))
c = df.columns.tolist()
c[c.index('Full_date')] = 'Date'
df.columns = c
return df
def read_weather_station_data(verbose=False):
'''
Downloads and reads weather sensor data from the Chicago data
portal. Downsamples the readings into the min, mean, and max
for each day and for each sensor. Each day only has one row,
with many columns (one column each per sensor per reading per
type of down-sampling process)
'''
url = 'https://data.cityofchicago.org/api/views/k7hf-8y75/rows.csv?accessType=DOWNLOAD'
weather_sensors = pd.read_csv(url)
url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'
sensor_locations = pd.read_csv(url)
weather_sensors.columns = map(lambda x: x.replace(' ', '_'),
weather_sensors.columns.tolist())
sensor_locations.columns = map(lambda x: x.replace(' ', '_'),
sensor_locations.columns.tolist())
sensor_locations.columns = ['Station_Name'] + sensor_locations.columns.tolist()[1:]
df = pd.merge(weather_sensors, sensor_locations, on='Station_Name')
df['Beach'] = df['Station_Name']
df['Date'] = pd.DatetimeIndex(df['Measurement_Timestamp']).normalize()
df.drop(['Measurement_Timestamp_Label', 'Measurement_Timestamp',
'Sensor_Type', 'Location', 'Measurement_ID', 'Battery_Life','Station_Name'],
axis=1, inplace=True)
df_mins = df.groupby(['Beach', 'Date'], as_index=False).min()
df_means = df.groupby(['Beach', 'Date'], as_index=False).mean()
df_maxes = df.groupby(['Beach', 'Date'], as_index=False).max()
cols = df_mins.columns.tolist()
def rename_columns(cols, aggregation_type):
cols = list(map(lambda x: x.replace(' ', '_'), cols))
for i in range(2,15):
cols[i] = cols[i] + '_' + aggregation_type
return cols
df_mins.columns = rename_columns(cols, 'Min')
df_means.columns = rename_columns(cols, 'Mean')
df_maxes.columns = rename_columns(cols, 'Max')
df = pd.merge(df_mins, df_means, on=['Beach', 'Date'])
df = pd.merge(df, df_maxes, on=['Beach', 'Date'])
df.drop(['Latitude_x', 'Latitude_y', 'Longitude_x', 'Longitude_y'], axis=1, inplace=True)
df = df.pivot(index='Date', columns='Beach')
df.columns = ['.'.join(col[::-1]).strip() for col in df.columns.values]
df.reset_index(inplace=True)
df.columns = ['Full_date'] + list( map(lambda x: x.replace(' ', '_'), df.columns.tolist()[1:]))
c = df.columns.tolist()
c[c.index('Full_date')] = 'Date'
df.columns = c
return df
def read_locations(file_name, verbose=False):
locations = pd.read_csv(file_name)
return locations
def print_full(x):
'''
Helper function to plot the *full* dataframe.
'''
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def date_lookup(s, verbose=False):
'''
This is an extremely fast approach to datetime parsing.
For large data, the same dates are often repeated. Rather than
re-parse these, we store all unique dates, parse them, and
use a lookup to convert all dates.
Thanks to fixxxer, found at
http://stackoverflow.com/questions/29882573
'''
dates = {date:pd.to_datetime(date, errors='ignore') for date in s.unique()}
for date, parsed in dates.items():
if type(parsed) is not pd.tslib.Timestamp:
logging.debug('Non-regular date format "{0}"'.format(date))
fmt = '%B %d (%p) %Y'
dates[date] = pd.to_datetime(date,format=fmt)
return s.apply(lambda v: dates[v])
def read_data(verbose=False):
'''
Read in the excel files for years 2006-2015 found in
'data/ChicagoParkDistrict/raw/Standard 18 hr Testing'
along with drekbeach data.
Also reformats columns in accordance with the transformations
found in analysis.R
'''
cpd_data_path = './data/ChicagoParkDistrict/raw/Standard 18 hr Testing/'
#cpd_data_path = os.path.join(os.path.dirname(__file__), cpd_data_path)
dfs = []
for yr in range(2006,2015):
dfs.append(split_sheets(cpd_data_path + str(yr) + ' Lab Results.xls', yr))
dfs.append(split_sheets(cpd_data_path + '2015 Lab Results.xlsx', 2015))
df = pd.concat(dfs)
# Need to reset the index to deal with the repeated concatenations
df.index = range(0, len(df.index))
# Some records are of the form <1 or >2440
# Remove the operator and treat the remaining string as the value.
# Also convert string to float, if possible
for col in ['Reading1', 'Reading2', 'Escherichia_coli']:
for i, val in enumerate(df[col].tolist()):
if isinstance(val, (str,bytes)):
val = val.replace('<', '').replace('>', '')
try:
df.ix[i, col] = float(val)
except ValueError:
# Sometimes strings are things like 'Sample Not Received'
if 'sample' in df.ix[i, col].lower():
logging.debug('Trying to cast "{0}" to numeric'.format(
df.ix[i, col]
))
else:
logging.info('Trying to cast "{0}" to numeric'.format(
df.ix[i, col]
))
df.ix[i, col] = float('nan')
df[col] = df[col].astype('float64')
# Massage dates, create weekday column
df.insert(0, 'Full_date', df[['Date', 'Year']].apply(lambda x: ' '.join(x), axis=1).apply(lambda x: x.replace(' (PM)', '') ))
df['Full_date'] = date_lookup(df['Full_date'])
df.insert(0, 'Timestamp', pd.to_datetime(df['Full_date'], errors='coerce') )
months=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
df.insert(0, 'Month', df['Timestamp'].dt.month.apply(lambda x: months[int(x)-1]) )
days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
df.insert(0, 'Weekday', df['Timestamp'].dt.dayofweek.apply(lambda x: days[int(x)]) )
df.drop(['Date','Timestamp'], axis=1,inplace=True )
# Some header rows were duplicated
df = df[df['Laboratory_ID'] != u'Laboratory ID']
# Normalize the beach names
df['Beach'] = df['Beach'].map(lambda x: x.strip())
cleanbeachnames = pd.read_csv(cpd_data_path + 'cleanbeachnames.csv')
cleanbeachnames = dict(zip(cleanbeachnames['Old'], cleanbeachnames['New']))
# There is one observation that does not have a beach name in the
# Beach column, remove it.
df = df[df['Beach'].map(lambda x: x in cleanbeachnames)]
df['Beach'] = df['Beach'].map(lambda x: cleanbeachnames[x])
# Read in drek beach data
drek_data_path = './data/DrekBeach/'
drekdata = pd.read_csv(drek_data_path + 'daily_summaries_drekb.csv')
drekdata.columns = ['Beach', 'Full_date', 'Drek_Reading','Drek_Prediction', 'Drek_Worst_Swim_Status']
drekdata['Full_date'] = date_lookup(drekdata['Full_date'])
drekdata['Beach'] = drekdata['Beach'].map(lambda x: x.strip())
drekdata['Beach'] = drekdata['Beach'].map(lambda x: cleanbeachnames[x])
df = pd.merge(df, drekdata, how='outer', on= ['Beach', 'Full_date'])
c = df.columns.tolist()
c[c.index('Full_date')] = 'Date'
df.columns = c
# get rid of some useless columns
df.drop(['Laboratory_ID','Units','Sample_Collection_Time','Drek_Worst_Swim_Status'], axis=1,inplace=True )
# There was an anamolous reading, the max possible value from the test
# is around 2420, but one reading was 6488.
# We need to do the ~(reading 1 > 2500 | reading 2 > 2500) instead of
# (reading 1 < 2500 & reading 2 < 2500) since the latter returns
# False if there is a NaN.
df = df[~((df['Reading1'] > 2500) | (df['Reading2'] > 2500))]
# R code creates a calculated geometric mean column b/c it didn't
# import the column correctly (it truncated the value). Pandas did
# import correctly, so no need to create that.
external_data_path = './data/ExternalData/'
#external_data_path = os.path.join(os.path.dirname(__file__),
# external_data_path)
holidaydata = read_holiday_data(external_data_path + 'Holidays.csv', verbose)
# TODO: merge holiday data
watersensordata = read_water_sensor_data(verbose)
df = | pd.merge(df, watersensordata, on='Date', how='outer') | pandas.merge |
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from train import train, loss_func, test
from model import NN, CNN
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.tree import DecisionTreeRegressor
from densratio import densratio
from pykliep import DensityRatioEstimator
import xgboost as xgb
file_names = ['books_processed_balanced',
'dvd_processed_balanced',
'electronics_processed_balanced',
'kitchen_processed_balanced']
def calc_result(reg, x0, y0, x1, y1, dr=None):
reg.fit(x0, y0, sample_weight=dr)
train_loss = np.mean((y0 - reg.predict(x0))**2)
test_loss = np.mean((y1 - reg.predict(x1))**2)
rating_temp = y1.copy()
rating_temp[rating_temp >= 3] = 100
auc = calc_auc(rating_temp, reg.predict(x1))
return train_loss, test_loss, auc
def calc_auc(y, f):
fpr, tpr, _ = metrics.roc_curve(y, f, pos_label=100)
auc = metrics.auc(fpr, tpr)
return 1-auc
def main():
ite = 10
num_train_data = 2000
num_test_data = 2000
Net = NN
model_num = 3
learning_rate = 1e-4
epoch = 200
batchsize = 256
seed = 2020
for f_name_idx0 in range(len(file_names)):
for f_name_idx1 in range(f_name_idx0+1, len(file_names)):
train_loss_normal = np.zeros((ite, model_num))
test_loss_normal = np.zeros((ite, model_num))
auc_normal = np.zeros((ite, model_num))
train_loss_kerulsif = np.zeros((ite, model_num))
test_loss_kerulsif = np.zeros((ite, model_num))
auc_kerulsif = np.zeros((ite, model_num))
train_loss_kerkleip = np.zeros((ite, model_num))
test_loss_kerkleip = np.zeros((ite, model_num))
auc_kerkleip = np.zeros((ite, model_num))
train_loss_pu = np.zeros((ite, model_num))
test_loss_pu = np.zeros((ite, model_num))
auc_pu = np.zeros((ite, model_num))
train_loss_ulsif = np.zeros((ite, model_num))
test_loss_ulsif = np.zeros((ite, model_num))
auc_ulsif = np.zeros((ite, model_num))
train_loss_nnpu = np.zeros((ite, model_num))
test_loss_nnpu = np.zeros((ite, model_num))
auc_nnpu = np.zeros((ite, model_num))
train_loss_nnulsif = np.zeros((ite, model_num))
test_loss_nnulsif = np.zeros((ite, model_num))
auc_nnulsif = np.zeros((ite, model_num))
f_name0 = file_names[f_name_idx0]
f_name1 = file_names[f_name_idx1]
for i in range(ite):
np.random.seed(seed)
if f_name0 != f_name1:
data0 = pd.read_csv('dataset/%s.csv'%f_name0)
data1 = pd.read_csv('dataset/%s.csv'%f_name1)
data0 = data0.dropna()
data1 = data1.dropna()
perm0 = np.random.permutation(len(data0))
perm1 = np.random.permutation(len(data1))
choice0 = np.zeros(len(data0))
choice0[perm0[:num_train_data]] = 1
data0['choice'] = choice0
choice1 = np.zeros(len(data1))
choice1[perm1[:num_test_data]] = 1
data1['choice'] = choice1
data0 = data0.get(['rating', 'text', 'item', 'choice'])
data1 = data1.get(['rating', 'text', 'item', 'choice'])
data = pd.concat([data0, data1])
else:
data = | pd.read_csv('dataset/%s.csv'%f_name0) | pandas.read_csv |
from sklearn.preprocessing import StandardScaler
import math
import pandas as pd
import numpy as np
import re
import pyBigWig
import pickle
import multiprocessing as mp
import time
import os
import sys
from keras.models import load_model
class DNArepresent:
def __init__(self, sequence, chrom, start, stop, strand, conservation_path):
self.sequence = sequence.upper()
self.chrom = chrom
self.start = start
self.stop = stop
self.strand = strand
self.conservation_path = conservation_path
def list_of_zeros(self):
x = [0]*len(self.sequence)
y = [0]*len(self.sequence)
z = [0]*len(self.sequence)
return(x, y, z)
def DNA_walk(self):
x = []
for i, f in enumerate(self.sequence):
if i == 0:
if f == 'C' or f == 'T':
x.append(1)
else:
x.append(-1)
else:
if f == 'C' or f == 'T':
x.append(x[i-1]+1)
else:
x.append(x[i-1]-1)
return(x)
def Z_curve(self):
x,y,z = self.list_of_zeros()
for i, f in enumerate(self.sequence):
if f == 'T' or f == 'G':
x[i] = x[i-1] + 1
else:
x[i] = x[i-1] - 1
if f == 'A' or f == 'C':
y[i] = y[i-1] + 1
else:
y[i] = y[i-1] - 1
if f == 'A' or f == 'T':
z[i] = z[i-1] + 1
else:
z[i] = z[i-1] - 1
return(x, y, z)
def paired_numeric(self):
x = []
for f in self.sequence:
if f == 'A' or f == 'T':
x.append(1)
else:
x.append(-1)
return(x)
def tetrahedron(self):
x,y,z = self.list_of_zeros()
for i, f in enumerate(self.sequence):
if f == 'T':
x[i] = 2*math.sqrt(2)/3
y[i] = 0
z[i] = -1/3
if f == 'C':
x[i] = -math.sqrt(2)/3
y[i] = math.sqrt(6)/3
z[i] = -1/3
if f == 'G':
x[i] = -math.sqrt(2)/3
y[i] = -math.sqrt(6)/3
z[i] = -1/3
if f == 'A':
x[i] = 0
y[i] = 0
z[i] = 1
return(x, y, z)
@classmethod
def onehot_conversion_sequence(cls, letter):
one_hot_map = {
"A": np.asarray([1, 0, 0, 0],dtype=np.float32), "a": np.asarray([1, 0, 0, 0],dtype=np.float32),
"C": np.asarray([0, 1, 0, 0],dtype=np.float32), "c": np.asarray([0, 1, 0, 0],dtype=np.float32),
"G": np.asarray([0, 0, 1, 0],dtype=np.float32), "g": np.asarray([0, 0, 1, 0],dtype=np.float32),
"T": np.asarray([0, 0, 0, 1],dtype=np.float32), "t": np.asarray([0, 0, 0, 1],dtype=np.float32),
"N": np.asarray([0, 0, 0, 0],dtype=np.float32), "n": np.asarray([0, 0, 0, 0],dtype=np.float32)}
return one_hot_map[letter]
def one_hot_encoder(self):
tmp = []
for letter in self.sequence:
tmp.append(self.onehot_conversion_sequence(letter))
out = np.vstack(tmp)
return (out)
def bendability(self, size=3):
out = [0, 0, 0]
for x in range(0, len(self.sequence) - size):
kmer = self.sequence[x:x + size]
if 'N' in kmer:
out.append('0')
else:
out.append(VALS_kmers[COLS_kmers.index(kmer)])
if self.strand == '-':
out = out[::-1]
out = np.vstack(out).astype(float)
return(out)
def propellerTwist(self, size=2):
out = ['-12.6', '-12.6']
for x in range(0, len(self.sequence) - size):
kmer = self.sequence[x:x + size]
if 'N' in kmer:
out.append('-12.6')
else:
out.append(VALS_kmers_proptwst[COLS_kmers_proptwst.index(kmer)])
if self.strand == '-':
out = out[::-1]
out = np.vstack(out).astype(float)
return(out)
def conservation_calc(self):
if self.conservation_path is not None:
bw = pyBigWig.open(self.conservation_path)
out = bw.values(self.chrom, self.start, self.stop)
out = np.vstack(out)
if self.strand == '-':
out = np.flip(out)
bw.close()
return(out)
else:
return(False)
def Read_file(infile):
with open(infile) as f:
lines = f.readlines()
cols = []
vals = []
for line in lines:
cols.append(line.strip().split("\t")[0])
vals.append(line.strip().split("\t")[1])
return(cols, vals)
def read_fasta(path_fasta):
fasta = pd.read_csv(path_fasta, header=None, sep="\t")
fasta[['chr', 'strand']] = fasta[0].str.split("(", expand=True)
fasta['strand'] = fasta['strand'].str[:-1]
fasta[0] = fasta[0].str[:-3]
fasta[['chr', 'start']] = fasta[0].str.split(":", expand=True)
fasta[['start', 'stop']] = fasta['start'].str.split("-", expand=True)
fasta['sequence'] = fasta[1]
fasta = fasta.drop([0, 1], axis=1)
# Cast to int
fasta['start'] = fasta['start'].astype(int)
fasta['stop'] = fasta['stop'].astype(int)
return(fasta)
def fix_coords(bed, sequence_len, path_bed):
bed['tmp'] = (((bed[1] + bed[2]) / 2) - (sequence_len / 2)).astype(int)
bed[2] = (((bed[1] + bed[2]) / 2) + (sequence_len / 2)).astype(int)
bed[1] = bed['tmp']
bed = bed.drop(['tmp'], axis=1)
bed = bed.rename(columns={0: 'chr', 1: 'start', 2: 'stop', 3: 'id', 4: 'tpm', 5: 'strand'})
# Save pos/neg set (to utilize bedtools getfasta)
bed.to_csv(path_bed, sep="\t", header=None, index=False)
return(bed)
def features(index, row):
# Create tss object
tss = DNArepresent(row['sequence'], row['chr'], row['start'], row['stop'], row['strand'], conservation_path)
# One-hot encoder sequence
enc_seq = tss.one_hot_encoder()
# Calculate DNA representations
dnawalk = tss.DNA_walk()
x, y, z = tss.Z_curve()
prd_Num = tss.paired_numeric()
r, g, b = tss.tetrahedron()
# Stack vertically all representations
dna_represent = np.vstack([dnawalk, x, y, z, prd_Num, r, g, b]).T
# Bendability
bend = tss.bendability()
# Propeller_Twist
propTwist = tss.propellerTwist()
if conservation_path is not None:
# Conservation
conservation = tss.conservation_calc()
cur = np.hstack([enc_seq, dna_represent, bend, propTwist, conservation])
else:
cur = np.hstack([enc_seq, dna_represent, bend, propTwist])
return( cur )
def split_features_branch(X, conservation_path):
# Split features for 3 branches
X1 = X[:, :, [0, 1, 2, 3]] # seq x4
X2 = X[:, :, [4, 5, 6, 7, 8, 9, 10, 11]] # features x8
X3 = X[:, :, [12,13]] # bendabillity, PropellerTwist x2
if conservation_path is not None:
X4 = X[:, :, [14]] # conservation x1
return(X1, X2, X3, X4)
else:
return (X1, X2, X3)
def standardize_feature(X, scaler=StandardScaler()):
X = scaler.fit_transform(X.reshape(-1, X.shape[-1])).reshape(X.shape)
return(X)
def export_file(result, prediction):
cur = result.copy()
cur['pred'] = prediction
cur['tpm'] = cur['tpm'].round(3)
cur['id'] = cur[['id', 'tpm']].astype(str).agg('||'.join, axis=1)
cur = cur.drop(['sequence', 'tpm'], axis=1)
cols = ['chr', 'start', 'stop', 'id', 'pred', 'strand']
cur = cur[cols]
cur['start'] = ((cur['start'].astype(int) + cur['stop'].astype(int))/2).astype(int)
cur['stop'] = cur['start'].astype(int) + 1
return(cur)
#------------------------------------------##------------------------------------------#
#------------------------------------------##------------------------------------------#
#------------------------------------------##------------------------------------------#
def main_predict(dict, representatives, work_dir):
start_time = time.time()
global ROOT_DIR
global conservation_path
global COLS_kmers
global VALS_kmers
global COLS_kmers_proptwst
global VALS_kmers_proptwst
out_dir = work_dir
cores = dict['threads']
conservation_path = dict['cons']
inputBed_path = representatives
hg38_fa = dict['hg38_fa']
sequence_len = 600
###############################################
# Current script dir
ROOT_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
scalers_path = os.path.join(ROOT_DIR, 'models', 'scalers')
scalers_path_cons = os.path.join(ROOT_DIR, 'models', 'scalers_cons')
model_path = os.path.join(ROOT_DIR, 'models', 'model17-10.hdf5')
model_path_cons = os.path.join(ROOT_DIR, 'models', 'model28-09.hdf5')
COLS_kmers, VALS_kmers = Read_file(f'{ROOT_DIR}/files/bendability.tsv')
COLS_kmers_proptwst, VALS_kmers_proptwst = Read_file(f'{ROOT_DIR}/files/Propeller_twist.tsv')
# Read representatives file (*.bed)
inputBed = pd.read_csv(inputBed_path, header=None, sep="\t")
# Fix coordinates start-sequence_len | stop+sequence_len from TSS (in case of coordinate > 1)
path_bed = f'{out_dir}/{os.path.basename(inputBed_path).split(".")[0]}_CNN.scored_tmp.bed'
inputBed = fix_coords(inputBed, sequence_len, path_bed)
# Bedtools getfasta
path_fasta = f'{os.path.dirname(path_bed)}/labeled.fasta'
cmd = f'bedtools getfasta -fi {hg38_fa} -bed {path_bed} -s -tab -fo {path_fasta}'
os.system(cmd)
# Read the fasta
fasta = read_fasta(path_fasta)
# Merge fasta & bed
result = | pd.merge(left=inputBed, right=fasta, how='left', left_on=['chr', 'start', 'stop', 'strand'], right_on=['chr', 'start', 'stop', 'strand']) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
##### HELPER FUNCTIONS #####
# import libraries
import numpy as np
import pandas as pd
from datetime import datetime,date
from device_detector import DeviceDetector
import re
### CLEANING
def drop_rows(df):
print('Starting dropping rows...')
# keep rows where exclude hit is == 0
df = df[df['exclude_hit'] == 0]
# keep rows where hit source != 5, 7, 8 or 9
df = df[(df['hit_source'] != 5) | (df['hit_source'] != 7) |(df['hit_source'] != 8) |(df['hit_source'] != 9)]
# keep rows where visitor id is not missing (6 missing values)
df = df[pd.notnull(df['visitor_id'])]
# clean visit page num and keep rows where visit page num is not missing or faulty (118 missing and 269 faulty values)
df['visit_page_num'] = df['visit_page_num'].apply(lambda x: np.nan if len(str(x)) > 10 else x)
df = df[pd.notnull(df['visit_page_num'])]
print('Dropping rows complete.')
return df
def drop_columns(df):
print('Starting dropping columns...')
# select columns to keep
columns_to_keep = ['visitor_id',
'visit_start_time_gmt',
'hit_time_gmt',
'date_time',
# numerical columns
'visit_num',
'visit_page_num',
'purchase_boolean',
'product_view_boolean',
'checkout_boolean',
'cart_addition_boolean',
'cart_removal_boolean',
'cart_view_boolean',
'campaign_view_boolean',
'cart_value',
'page_view_boolean',
'last_purchase_num',
'standard_search_results_clicked',
'standard_search_started',
'suggested_search_results_clicked',
# categorical columns
'country',
'cookies',
'persistent_cookie',
'search_page_num',
'connection_type',
'search_engine',
'marketing_channel',
'referrer_type',
'new_visit',
'hourly_visitor',
'daily_visitor',
'weekly_visitor',
'monthly_visitor',
'quarterly_visitor',
'yearly_visitor',
'product_items',
'product_item_price',
'product_categories',
'device_type_user_agent',
'device_brand_name_user_agent',
'device_operating_system_user_agent',
'device_browser_user_agent',
'repeat_orders',
'net_promoter_score',
'hit_of_logged_in_user',
'registered_user',
'user_gender',
'user_age',
'visit_during_tv_spot']
# subset dataframe to select only columns to keep
df = df[columns_to_keep]
print('Dropping columns complete.')
return df
def rename_columns(df):
print('Starting renaming columns...')
df.rename(columns={'va_closer_id' : 'marketing_channel'}, inplace=True)
df.rename(columns={'os' : 'operating_system'}, inplace=True)
df.rename(columns={'ref_type' : 'referrer_type'}, inplace=True)
df.rename(columns={'post_search_engine' : 'search_engine'}, inplace=True)
df.rename(columns={'cart_value_(v50)' : 'cart_value'}, inplace=True)
df.rename(columns={'int._stand._search_result_clicked_(e16)' : 'standard_search_results_clicked'}, inplace=True)
df.rename(columns={'active_stand._search_started_(e17)' : 'standard_search_started'}, inplace=True)
df.rename(columns={'sugg._search_result_clicked_(e18)' : 'suggested_search_results_clicked'}, inplace=True)
df.rename(columns={'post_cookies' : 'cookies'}, inplace=True)
df.rename(columns={'post_persistent_cookie' : 'persistent_cookie'}, inplace=True)
df.rename(columns={'repeat_orders_(e9)' : 'repeat_orders'}, inplace=True)
df.rename(columns={'net_promoter_score_raw_(v10)_-_user' : 'net_promoter_score'}, inplace=True)
df.rename(columns={'hit_of_logged_in_user_(e23)' : 'hit_of_logged_in_user'}, inplace=True)
df.rename(columns={'registered_user_(user)_(v34)' : 'registered_user'}, inplace=True)
df.rename(columns={'user_gender_(v61)' : 'user_gender'}, inplace=True)
df.rename(columns={'user_age_(v62)' : 'user_age'}, inplace=True)
df.rename(columns={'visit_during_tv_spot_(e71)' : 'visit_during_tv_spot'}, inplace=True)
print('Renaming columns complete')
return df
def fill_missing_and_faulty_values(df):
print('Starting filling missing and faulty values...')
df['cart_value'].fillna(0, inplace=True)
df['registered_user'] = df['registered_user'].apply(lambda x: 1 if x == 'y' else 0)
df['cookies'] = df['cookies'].apply(lambda x: 1 if x == 'Y' else 0)
df['persistent_cookie'] = df['persistent_cookie'].apply(lambda x: 1 if x == 'Y' else 0)
print('Filling missing and faulty values complete.')
return df
def cast_data_types(df):
print('Starting casting data types...')
# datetime columns
df['date_time'] = df['date_time'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
df['hit_time_gmt'] = pd.to_datetime(df['hit_time_gmt'], unit='s')
df['visit_start_time_gmt'] = pd.to_datetime(df['visit_start_time_gmt'], unit='s')
# integer columns
integer_columns = ['visit_num',
'visit_page_num',
'purchase_boolean',
'product_view_boolean',
'checkout_boolean',
'cart_addition_boolean',
'cart_removal_boolean',
'cart_view_boolean',
'campaign_view_boolean',
'page_view_boolean',
'last_purchase_num',
'standard_search_results_clicked',
'standard_search_started',
'suggested_search_results_clicked',
'cookies',
'persistent_cookie',
'search_page_num',
'new_visit',
'hourly_visitor',
'daily_visitor',
'weekly_visitor',
'monthly_visitor',
'quarterly_visitor',
'yearly_visitor',
'repeat_orders',
'hit_of_logged_in_user',
'registered_user',
'visit_during_tv_spot']
for column in integer_columns:
df[column] = df[column].apply(lambda x: int(float(x)))
# float column
df['cart_value'] = df['cart_value'].apply(lambda x: float(x))
print('Casting data types complete.')
return df
### MAPPING
def connection_type_mapping(df):
print('Starting connection type mapping...')
# load file for connection type mapping and select columns
connection_type_mapping = pd.read_csv('../data/mapping_files/connection_type.tsv', sep='\t', header=None)
connection_type_mapping.columns = ['connection_type_id', 'connection_type_name']
# create dictionary for connection type mapping
connection_type_mapping_dict = dict(zip(connection_type_mapping.connection_type_id, connection_type_mapping.connection_type_name))
# map connection types
df['connection_type'] = df['connection_type'].map(connection_type_mapping_dict).fillna(df['connection_type'])
print('Connection type mapping complete.')
return df
def country_mapping(df):
print('Starting country mapping...')
# load file for country mapping and select columns
country_mapping = pd.read_csv('../data/mapping_files/country.tsv', sep='\t', header=None)
country_mapping.columns = ['country_id', 'country_name']
# drop dupliate countries
country_mapping = country_mapping.drop_duplicates('country_name').reset_index(drop=True)
# create dictionary for country mapping
country_mapping_dict = dict(zip(country_mapping.country_id, country_mapping.country_name))
# map countries
df['country'] = df['country'].map(country_mapping_dict).fillna(df['country'])
print('Country mapping complete.')
return df
def custom_evars_mapping(df):
print('Starting custom evars mapping...')
# load file for custom evars mapping and select columns
evars = pd.read_csv('../data/mapping_files/custom_evars.tsv', sep='\t')
evars_mapping = evars[['id', 'name']]
# map custom evars
evar_cols = [x for x in df.columns if x.lower()[:9] == 'post_evar']
evar_cols = [x.replace('post_', '') for x in evar_cols]
evars_mapped = evars[evars['id'].isin(evar_cols)][['id', 'name']]
evars_mapped['id'] = evars_mapped['id'].apply(lambda x: 'post_' + x)
evars_mapped = evars_mapped.reset_index(drop=True)
# rename custom evars
for i in range(evars_mapped.shape[0]):
df.rename(columns={evars_mapped.iloc[i,0] : str.lower(evars_mapped.iloc[i,1]).replace(' ','_')}, inplace=True)
print('Custom evars mapping complete.')
return df
def custom_marketing_channel_mapping(df):
print('Starting custom marketing channel mapping...')
# load file for custom marketing channel mapping
custom_marketing_channel_mapping = pd.read_csv('../data/mapping_files/custom_marketing_channels.tsv', sep='\t')
# create dictionary for marketing channel mapping
custom_marketing_channel_mapping_dict = dict(zip(custom_marketing_channel_mapping.channel_id, custom_marketing_channel_mapping.name))
# map custom marketing channels
df['va_closer_id'] = df['va_closer_id'].apply(lambda x: float(x))
df['va_closer_id'] = df['va_closer_id'].map(custom_marketing_channel_mapping_dict).fillna(df['va_closer_id'])
df['va_closer_id'] = df['va_closer_id'].apply(lambda x: 'Unknown' if x == 0 else x)
print('Custom marketing channel mapping complete.')
return df
def custom_and_standard_events_mapping(df):
print('Starting custom and standard events mapping...')
# fill missing values in post event list
df['post_event_list'] = df['post_event_list'].fillna('Unknown')
# load file for standard event mapping and select columns
standard_events = pd.read_csv('../data/mapping_files/event.tsv', sep='\t', header=None)
standard_events.columns = ['event_id', 'event_name']
# load file for custom event mapping and modify event id for matching
custom_events = pd.read_csv('../data/mapping_files/custom_events.tsv', sep='\t')
custom_events['event_id'] = custom_events.index + 200
# map standard and custom events
events = pd.merge(standard_events, custom_events, how='inner', on='event_id')
events_mapping = events[['event_id', 'name']]
events_mapping = events_mapping.reset_index(drop=True)
# create event dummies
for id, event in zip(events_mapping.iloc[:,0], events_mapping.iloc[:,1]):
df[str.lower(event).replace(' ','_')] = df['post_event_list'].apply(lambda x: 1 if ','+str(id)+',' in x else 0)
# drop internal users
df = df[df['internal_user_(e30)'] != 1]
print('Standard and custom events mapping complete.')
return df
def referrer_type_mapping(df):
print('Starting referrer type mapping...')
# load file for referrer type mapping and select columns
referrer_type_mapping = pd.read_csv('../data/mapping_files/referrer_type.tsv', sep='\t', header=None)
referrer_type_mapping.columns = ['referrer_type_id', 'referrer_type_name', 'referrer_type']
# create dictionary for referrer type mapping
referrer_type_mapping_dict = dict(zip(referrer_type_mapping.referrer_type_id, referrer_type_mapping.referrer_type))
# map referrer types
df['ref_type'] = df['ref_type'].map(referrer_type_mapping_dict).fillna(df['ref_type'])
print('Referrer type mapping complete.')
return df
def search_engine_mapping(df):
print('Starting search engine mapping...')
# load file for search engine mapping and select columns
search_engine_mapping = | pd.read_csv('../data/mapping_files/search_engines.tsv', sep='\t', header=None) | pandas.read_csv |
import collections
import logging
import pandas as pd
import sklearn.linear_model as slm
import core.artificial_signal_generators as sig_gen
import core.config as cconfig
import core.dataflow as dtf
import core.dataframe_modeler as dfmod
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestDataFrameModeler(hut.TestCase):
def test_dump_json1(self) -> None:
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index=pd.date_range("2010-01-01", periods=3),
)
oos_start = pd.Timestamp("2010-01-01")
info = collections.OrderedDict({"df_info": dtf.get_df_info_as_string(df)})
df_modeler = dfmod.DataFrameModeler(df, oos_start=oos_start, info=info)
output = df_modeler.dump_json()
self.check_string(output)
def test_load_json1(self) -> None:
"""
Test by dumping json and loading it again.
"""
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index=pd.date_range("2010-01-01", periods=3),
)
oos_start = pd.Timestamp("2010-01-01")
info = collections.OrderedDict({"df_info": dtf.get_df_info_as_string(df)})
df_modeler = dfmod.DataFrameModeler(df, oos_start=oos_start, info=info)
json_str = df_modeler.dump_json()
df_modeler_loaded = dfmod.DataFrameModeler.load_json(json_str)
| pd.testing.assert_frame_equal(df_modeler.df, df_modeler_loaded.df) | pandas.testing.assert_frame_equal |
import logging as log
import os.path
import math
import pandas as pd
import numpy as np
# for combinations of metric names
from itertools import combinations, chain
from PyQt5 import QtCore
class Data:
def __init__(self):
"""
Class that stores input data.
This class will handle data import using: Data.importFile(filename).
Dataframes will be stored as a dictionary with sheet names as keys
and pandas DataFrame as values
This class will keep track of the currently selected sheet and will
return that sheet when getData() method is called.
"""
self.sheetNames = ["None"]
self._currentSheet = 0
self.STATIC_NAMES = ['T', 'FC', 'CFC']
self.STATIC_COLUMNS = len(self.STATIC_NAMES) # 3 for T, FC, CFC columns
self.dataSet = {"None": None}
# self._numCovariates = 0
self.numCovariates = 0
self._n = 0
self.containsHeader = True
self.metricNames = []
self.metricNameCombinations = []
self.metricNameDictionary = {}
self._max_interval = 0
self.setupMetricNameDictionary()
@property
def currentSheet(self):
return self._currentSheet
@currentSheet.setter
def currentSheet(self, index):
if index < len(self.sheetNames) and index >= 0:
self._currentSheet = index
log.info("Current sheet index set to %d.", index)
else:
self._currentSheet = 0
log.info("Cannot set sheet to index %d since the data does not contain a sheet with that index.\
Sheet index instead set to 0.", index)
@property
def n(self):
self._n = self.dataSet[self.sheetNames[self._currentSheet]]['FC'].size
return self._n
@property
def max_interval(self):
return self._max_interval
@max_interval.setter
def max_interval(self, interval):
if interval < 5:
self._max_interval = 5
else:
self._max_interval = interval
def getData(self):
"""
Returns dataframe corresponding to the currentSheet index
"""
full_dataset = self.dataSet[self.sheetNames[self._currentSheet]]
try:
subset = full_dataset[:self._max_interval]
except TypeError:
# if None type, data hasn't been loaded
# cannot subscript None type
return full_dataset
return subset
def getDataSubset(self, fraction):
"""
Returns subset of dataframe corresponding to the currentSheet index
Args:
percentage: float between 0.0 and 1.0 indicating percentage of
data to return
"""
intervals = math.floor(self.n * fraction)
# need at least 5 data points
if intervals < 5:
intervals = 5
full_dataset = self.dataSet[self.sheetNames[self._currentSheet]]
subset = full_dataset[:intervals]
return subset
def getFullData(self):
return self.dataSet[self.sheetNames[self._currentSheet]]
def getDataModel(self):
"""
Returns PandasModel for the current dataFrame to be displayed
on a QTableWidget
"""
return PandasModel(self.getData())
def setupMetricNameDictionary(self):
"""
For allocation table. Allows the effort allocation to be placed in correct column.
Metric name maps to number of metric (from imported data).
"""
i = 0
for name in self.metricNames:
self.metricNameDictionary[name] = i
i += 1
def processFT(self, data):
"""
Processes raw FT data to fill in any gaps
Args:
data: Raw pandas dataframe
Returns:
data: Processed pandas dataframe
"""
# failure time
if 'FT' not in data:
data["FT"] = data["IF"].cumsum()
# inter failure time
elif 'IF' not in data:
data['IF'] = data['FT'].diff()
data['IF'].iloc[0] = data['FT'].iloc[0]
if 'FN' not in data:
data['FN'] = pd.Series([i+1 for i in range(data['FT'].size)])
return data
def initialNumCovariates(self, data):
"""
Calculates the number of covariates on a given sheet
"""
numCov = len(data.columns) - self.STATIC_COLUMNS
# log.debug("%d covariates.", self._numCovariates)
return numCov
def renameHeader(self, data, numCov):
"""
Renames column headers if covariate metrics are unnamed
"""
data.rename(columns={data.columns[0]:"Time"}, inplace=True)
data.rename(columns={data.columns[1]:"Failures"}, inplace=True)
for i in range(numCov):
data.rename(columns={data.columns[i+2]:"C{0}".format(i+1)}, inplace=True) # changed from MetricX to CX
def importFile(self, fname):
"""
Imports data file
Args:
fname : Filename of csv or excel file
"""
self.filename, fileExtenstion = os.path.splitext(fname)
if fileExtenstion == ".csv":
if self.hasHeader(fname, fileExtenstion):
# data has header, can read in normally
data = {}
data["None"] = pd.read_csv(fname)
else:
# data does not have a header, need to specify
data = {}
data["None"] = pd.read_csv(fname, header=None)
else:
if self.hasHeader(fname, fileExtenstion):
# data has header, can read in normally
# *** don't think it takes into account differences in sheets
data = pd.read_excel(fname, sheet_name=None, engine="openpyxl")
else:
data = pd.read_excel(fname, sheet_name=None, header=None, engine="openpyxl")
self.sheetNames = list(data.keys())
self._currentSheet = 0
self.setData(data)
self.setNumCovariates()
self._n = data[self.sheetNames[self._currentSheet]]['FC'].size
# self.metricNames = self.dataSet[self.sheetNames[self._currentSheet]].columns.values[2:2+self.numCovariates]
self.setMetricNames()
self.getMetricNameCombinations()
self.setupMetricNameDictionary()
def hasHeader(self, fname, extension, rows=2):
"""
Determines if loaded data has a header
Args:
fname : Filename of csv or excel file
extension : file extension of opened file
rows : number of rows of file to compare
Returns:
bool : True if data has header, False if it does not
"""
if extension == ".csv":
df = pd.read_csv(fname, header=None, nrows=rows)
df_header = pd.read_csv(fname, nrows=rows)
else:
df = | pd.read_excel(fname, header=None, nrows=rows, engine="openpyxl") | pandas.read_excel |
from django.test import TestCase
from parcels.transform import cleanName
import pandas as pd
import unittest
class TestCleanName(unittest.TestCase):
def testLower(self):
self.assertEqual(cleanName('This is A test'), 'this is a test')
def testLLC(self):
self.assertEqual(cleanName('cat llLLC'), 'cat llllc')
self.assertEqual(cleanName('Llama LLC'), 'llama')
self.assertEqual(cleanName('Frog, LC'), 'frog')
self.assertEqual(cleanName('LLC Baboon, LlC'), 'llc baboon')
self.assertEqual(cleanName('229 Queen Rental LLC'), '229 queen rental')
def testSpaces(self):
self.assertEqual(cleanName(' a b c '), 'a b c')
class TestOwnerOrApplication(unittest.TestCase):
def testMissingOwnerPhone(self):
data = [['tom', None], [None, 'sally'], [None, None]]
df = | pd.DataFrame(data, columns=['owner', 'applicant']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Main module to
"""
# Global imports
import logging
logging.getLogger().setLevel(logging.INFO)
import os
import pickle
import glob
import dask.dataframe as dd
import pandas as pd
import numpy as np
import datetime
from pathlib import Path
from scipy.stats import rankdata
# Local imports
from ..common import constants
from .utils import vert_aggregation, split_event
from .rfdefinitions import RandomForestRegressorBC
from ..common.utils import perfscores, envyaml
from ..common.graphics import plot_crossval_stats
dir_path = os.path.dirname(os.path.realpath(__file__))
class RFTraining(object):
'''
This is the main class that allows to preparate data for random forest
training, train random forests and perform cross-validation of trained models
'''
def __init__(self, db_location, input_location = None,
force_regenerate_input = False):
"""
Initializes the class and if needed prepare input data for the training
Note that when calling this constructor the input data is only
generated for the central pixel (NX = NY = 0 = loc of gauge), if you
want to regenerate the inputs for all neighbour pixels, please
call the function self.prepare_input(only_center_pixel = False)
Parameters
----------
db_location : str
Location of the main directory of the database (with subfolders
'reference', 'gauge' and 'radar' on the filesystem)
input_location : str
Location of the prepared input data, if this data can not be found
in this folder, it will computed here, default is a subfolder
called rf_input_data within db_location
force_regenerate_input : bool
if True the input parquet files will always be regenerated from
the database even if already present in the input_location folder
"""
if input_location == None:
input_location = str(Path(db_location, 'rf_input_data'))
# Check if at least gauge.parquet, refer_x0y0.parquet and radar_x0y0.parquet
# are present
valid = True
if not os.path.exists(input_location):
valid = False
os.makedirs(input_location)
files = glob.glob(str(Path(input_location, '*')))
files = [os.path.basename(f) for f in files]
if ('gauge.parquet' not in files or 'reference_x0y0.parquet' not in files
or 'radar_x0y0.parquet' not in files):
valid = False
self.input_location = input_location
self.db_location = db_location
if not valid :
logging.info('Could not find valid input data from the folder {:s}'.format(input_location))
if force_regenerate_input or not valid:
logging.info('The program will now compute this input data from the database, this takes quite some time')
self.prepare_input()
def prepare_input(self, only_center = True):
"""
Reads the data from the database in db_location and processes it to
create easy to use parquet input files for the ML training and stores
them in the input_location, the processing steps involve
For every neighbour of the station (i.e. from -1-1 to +1+1):
- Replace missing flags by nans
- Filter out timesteps which are not present in the three tables
(gauge, reference and radar)
- Filter out incomplete hours (i.e. where less than 6 10 min timesteps
are available)
- Add height above ground and height of iso0 to radar data
- Save a separate parquet file for radar, gauge and reference data
- Save a grouping_idx pickle file containing *grp_vertical*
index (groups all radar rows with same timestep and station),
*grp_hourly* (groups all timesteps with same hours) and *tstamp_unique*
(list of all unique timestamps)
Parameters
----------
only_center : bool
If set to True only the input data for the central neighbour
i.e. NX = NY = 0 (the location of the gauge) will be recomputed
this takes much less time and is the default option since until
now the neighbour values are not used in the training of the RF
QPE
"""
if only_center:
nx = [0]
ny = [0]
else:
nx = [0,1,-1]
ny = [0,1,-1]
gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')),
compression='gzip',
assume_missing=True,
dtype = {'TIMESTAMP':int, 'STATION': str})
gauge = gauge.compute().drop_duplicates()
gauge = gauge.replace(-9999,np.nan)
for x in nx:
for y in ny:
logging.info('Processing neighbour {:d}{:d}'.format(x, y))
radar = dd.read_parquet(str(Path(self.db_location, 'radar',
'*.parquet')))
refer = dd.read_parquet(str(Path(self.db_location, 'reference',
'*.parquet')))
# Select only required pixel
radar = radar.loc[np.logical_and(radar['NX'] == x,
radar['NY'] == y)]
refer = refer.loc[np.logical_and(refer['NX'] == x,
refer['NY'] == y)]
# Convert to pandas and remove duplicates
radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',
'STATION',
'RADAR',
'NX','NY',
'SWEEP'])
refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',
'STATION'])
radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])
refer = refer.sort_values(by = ['TIMESTAMP','STATION'])
gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])
# Get only valid precip data
gauge = gauge[np.isfinite(gauge['RRE150Z0'])]
# Create individual 10 min - station stamps
gauge['s-tstamp'] = np.array(gauge['STATION'] +
gauge['TIMESTAMP'].astype(str)).astype(str)
radar['s-tstamp'] = np.array(radar['STATION'] +
radar['TIMESTAMP'].astype(str)).astype(str)
refer['s-tstamp'] = np.array(refer['STATION'] +
refer['TIMESTAMP'].astype(str)).astype(str)
# Get gauge and reference only when radar data available
# Find timestamps that are in the three datasets
ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])
.intersection(set(refer['s-tstamp'])))))
ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])
.intersection(set(ststamp_common)))))
radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]
gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]
refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]
# Filter incomplete hours
stahour = np.array(gauge['STATION'] +
((gauge['TIMESTAMP'] - 600 ) -
(gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)
full_hours = np.array(gauge.groupby(stahour)['STATION']
.transform('count') == 6)
refer = refer.reindex[full_hours]
gauge = gauge.reindex[full_hours]
radar = radar.reindex[radar['s-tstamp'].
isin(np.array(gauge['s-tstamp']))]
stahour = stahour[full_hours]
# Creating vertical grouping index
_, idx, grp_vertical = np.unique(radar['s-tstamp'],
return_inverse = True,
return_index = True)
# Get original order
sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]
# Preserves order and avoids sorting radar_statstamp
grp_vertical = idx[grp_vertical]
# However one issue is that the indexes are not starting from zero with increment
# of one, though they are sorted, they are like 0,7,7,7,15,15,23,23
# We want them starting from zero with step of one
grp_vertical = rankdata(grp_vertical,method='dense') - 1
# Repeat operation with gauge hours
sta_hourly_unique, idx, grp_hourly = np.unique(stahour,
return_inverse = True,
return_index = True)
grp_hourly = idx[grp_hourly]
# Add derived variables height iso0 (HISO) and height above ground (HAG)
# Radar
stations = constants.METSTATIONS
cols = list(stations.columns)
cols[1] = 'STATION'
stations.columns = cols
radar = pd.merge(radar,stations, how = 'left', on = 'STATION',
sort = False)
radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100
radar['HAG'] = radar['HEIGHT'] - radar['Z']
radar['HAG'][radar['HAG'] < 0] = 0
# Gauge
gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60
# Save all to file
refer.to_parquet(str(Path(self.input_location,
'reference_x{:d}y{:d}.parquet'.format(x,y))),
compression = 'gzip', index = False)
radar.to_parquet(str(Path(self.input_location,
'radar_x{:d}y{:d}.parquet'.format(x,y))),
compression = 'gzip', index = False)
grp_idx = {}
grp_idx['grp_vertical'] = grp_vertical
grp_idx['grp_hourly'] = grp_hourly
grp_idx['tstamp_unique'] = sta_tstamp_unique
pickle.dump(grp_idx,
open(str(Path(self.input_location,
'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))
if x == 0 and y == 0:
# Save only gauge for center pixel since it's available only there
gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),
compression = 'gzip', index = False)
def fit_models(self, config_file, features_dic, tstart = None, tend = None,
output_folder = None):
"""
Fits a new RF model that can be used to compute QPE realizations and
saves them to disk in pickle format
Parameters
----------
config_file : str
Location of the RF training configuration file, if not provided
the default one in the ml submodule will be used
features_dic : dict
A dictionary whose keys are the names of the models you want to
create (a string) and the values are lists of features you want to
use. For example {'RF_dualpol':['RADAR', 'zh_VISIB_mean',
'zv_VISIB_mean','KDP_mean','RHOHV_mean','T', 'HEIGHT','VISIB_mean']}
will train a model with all these features that will then be stored
under the name RF_dualpol_BC_<type of BC>.p in the ml/rf_models dir
tstart : datetime
the starting time of the training time interval, default is to start
at the beginning of the time interval covered by the database
tend : datetime
the end time of the training time interval, default is to end
at the end of the time interval covered by the database
output_folder : str
Location where to store the trained models in pickle format,
if not provided it will store them in the standard location
<library_path>/ml/rf_models
"""
if output_folder == None:
output_folder = str(Path(dir_path, 'rf_models'))
try:
config = envyaml(config_file)
except:
logging.warning('Using default config as no valid config file was provided')
config_file = dir_path + '/default_config.yml'
config = envyaml(config_file)
#######################################################################
# Read data
#######################################################################
logging.info('Loading input data')
radartab = pd.read_parquet(str(Path(self.input_location, 'radar_x0y0.parquet')))
gaugetab = pd.read_parquet(str(Path(self.input_location, 'gauge.parquet')))
grp = pickle.load(open(str(Path(self.input_location, 'grouping_idx_x0y0.p')),'rb'))
grp_vertical = grp['grp_vertical']
vweights = 10**(config['VERT_AGG']['BETA'] * (radartab['HEIGHT']/1000.)) # vert. weights
###############################################################################
# Compute additional data if needed
###############################################################################
# currently the only supported additional features is zh (refl in linear units)
# and DIST_TO_RAD{A-D-L-W-P} (dist to individual radars)
# Get list of unique features names
features = np.unique([item for sub in list(features_dic.values())
for item in sub])
for f in features:
if 'zh' in f:
logging.info('Computing derived variable {:s}'.format(f))
radartab[f] = 10**(0.1 * radartab[f.replace('zh','ZH')])
elif 'zv' in f:
logging.info('Computing derived variable {:s}'.format(f))
radartab[f] = 10**(0.1 * radartab[f.replace('zv','ZV')])
if 'DIST_TO_RAD' in f:
info_radar = constants.RADARS
vals = np.unique(radartab['RADAR'])
for val in vals:
dist = np.sqrt((radartab['X'] - info_radar['X'][val])**2+
(radartab['Y'] - info_radar['Y'][val])**2) / 1000.
radartab['DIST_TO_RAD' + str(val)] = dist
###############################################################################
# Compute data filter
###############################################################################
filterconf = config['FILTERING']
logging.info('Computing data filter')
logging.info('List of stations to ignore {:s}'.format(','.join(filterconf['STA_TO_REMOVE'])))
logging.info('Start time {:s}'.format(str(tstart)))
logging.info('End time {:s}'.format(str(tend)))
logging.info('ZH must be > {:f} if R <= {:f}'.format(filterconf['CONSTRAINT_MIN_ZH'][1],
filterconf['CONSTRAINT_MIN_ZH'][0]))
logging.info('ZH must be < {:f} if R <= {:f}'.format(filterconf['CONSTRAINT_MAX_ZH'][1],
filterconf['CONSTRAINT_MAX_ZH'][0]))
ZH_agg = vert_aggregation( | pd.DataFrame(radartab['ZH_mean']) | pandas.DataFrame |
import logging
import time
from datetime import datetime, timedelta
from pprint import pprint
import ccxt
import pandas as pd
from project.server.main.utils.db import db_insert, db_insert_many, db_fetch, db_aggregate, db_insert_test
from project.server.main.utils.utils import f, get_time, transform_time_ccxt, get_json, f_btc, percentage, \
map_portfolio, os_get, integer
def portfolio():
start = time.perf_counter()
print("TASK: portfolio started")
logging.info("TASK: portfolio started")
binance = ccxt.binance({
"apiKey": os_get("BINANCE_KEY"),
"secret": os_get("BINANCE_SECRET"),
'enableRateLimit': True,
})
binance.load_markets()
bitmex = ccxt.bitmex({
"apiKey": os_get("BITMEX_KEY"),
"secret": os_get("BITMEX_SECRET"),
'options': {
'api-expires': 86400, # 1 day for the sake of experiment
},
'enableRateLimit': True,
})
bitmex.load_markets()
portfolio_24h = db_fetch("SELECT * FROM db.portfolio WHERE timestamp > '%s' ORDER BY timestamp ASC LIMIT 1;" % str(
datetime.utcnow() - timedelta(hours=24)))
portfolio_24h = map_portfolio(portfolio_24h)[0] if portfolio_24h else None
# pprint(map_portfolio(portfolio_24h))
portfolio_1w = db_fetch("SELECT * FROM db.portfolio WHERE timestamp > '%s' ORDER BY timestamp ASC LIMIT 1;" % str(
datetime.utcnow() - timedelta(days=7)))
portfolio_1w = map_portfolio(portfolio_1w)[0] if portfolio_1w else None
atari = None
atari_amount = 1760
atari = get_json("https://api.nomics.com/v1/currencies/ticker?key=" + os_get(
'NOMICS_KEY') + "&ids=ATRI&interval=1d,30d,7d&convert=USD")
atari = atari[0] if atari else None
# print(atari)
# pprint(list(portfolio_1w))
# pprint(map_portfolio(portfolio_1w))
def get_price(exchange, curr: str):
if curr == 'USDT':
return 1.0
else:
try:
tick = exchange.fetchTicker(curr + '/USDT')
mid_point = tick['bid']
return round(float(mid_point), 2)
except:
return None
def get_price_btc(exchange, curr: str):
if curr == 'BTC':
return 1.0
else:
try:
tick = exchange.fetchTicker(curr + '/BTC')
mid_point = tick['bid']
return mid_point
except:
return None
btc_usd = binance.fetchTicker('BTC/USDT')['bid']
eth_usd = binance.fetchTicker('ETH/USDT')['bid']
##################################################################################################################
# BINANCE BALANCES
##################################################################################################################
def get_binance_balances(exchange):
binance_balance = exchange.fetch_balance()['info']['balances']
balances = []
for i, obj in enumerate(binance_balance):
used = f(obj['locked'])
used = used if (used > 0.001 and obj['asset'] != "BTC") else 0.0
free = f(obj['free'])
free = free if (free > 0.001 and obj['asset'] != "BTC") else 0.0
total = f(used + free)
if total and total > 0.0:
bid_price = get_price(exchange, obj['asset'])
if bid_price and round(bid_price * total, 2) > 10.0:
bid_price_btc = get_price_btc(exchange, obj['asset'])
balance = {
'timestamp': str(get_time()),
'currency': obj['asset'],
'amount': total,
'price': f(bid_price),
'price_btc': f_btc(bid_price_btc),
'balance': f(bid_price * total),
'balance_btc': f_btc((bid_price * total) / btc_usd),
'used': used,
'free': free
}
used_percentage = percentage(free, total) * -1 if free != 0 else 100
used_percentage = 100 if 100 > used_percentage > 98 else used_percentage
balance['used_percentage'] = used_percentage
balances.append(balance)
return balances
binance_balances = get_binance_balances(binance)
binance_balances = sorted(binance_balances, key=lambda d: d['balance'], reverse=True)
# pprint(binance_balances)
db_insert_many('binance_balances', binance_balances)
get_binance_balances_df = | pd.DataFrame(binance_balances) | pandas.DataFrame |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
from .dataset import DataSet
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, ReindexMapError
class Brca(DataSet):
def __init__(self, version="latest"):
"""Load all of the brca dataframes as values in the self._data dict variable, with names as keys, and format them properly."""
# Set some needed variables, and pass them to the parent DataSet class __init__ function
valid_versions = ["3.1", "3.1.1"] # This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
data_files = {
"3.1": [
"prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz",
"prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-rnaseq-fpkm-log2-row-norm-2comp.gct.gz",
"prosp-brca-v3.1-sample-annotation.csv.gz"],
"3.1.1": [
"Breast_One_Year_Clinical_Data_20160927.xls",
"prosp-brca-v3.0-v1.4.somatic.variants.070918.maf.gz",
"prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz",
"prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-rnaseq-fpkm-log2-row-norm-2comp.gct.gz",
"prosp-brca-v3.1-sample-annotation.csv.gz"],
}
super().__init__(cancer_type="brca", version=version, valid_versions=valid_versions, data_files=data_files)
# Load the data into dataframes in the self._data dict
loading_msg = "Loading dataframes"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file
if file_name == "prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["GeneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
# Prepare some columns we'll need later for the multiindex
df["variableSites"] = df["variableSites"].str.replace(r"[a-z\s]", "") # Get rid of all lowercase delimeters and whitespace in the sites
df = df.rename(columns={
"GeneSymbol": "Name",
"variableSites": "Site",
"sequence": "Peptide", # We take this instead of sequenceVML, to match the other datasets' format
"accession_numbers": "Database_ID" # We take all accession numbers they have, instead of the singular accession_number column
})
# Some rows have at least one localized acetylation site, but also have other acetylations that aren't localized. We'll drop those rows, if their localized sites are duplicated in another row, to avoid creating duplicates, because we only preserve information about the localized sites in a given row. However, if the localized sites aren't duplicated in another row, we'll keep the row.
split_ids = df["id"].str.split('_', expand=True)
unlocalized_to_drop = df.index[~split_ids[3].eq(split_ids[4]) & df.duplicated(["Name", "Site", "Peptide", "Database_ID"], keep=False)] # Column 3 of the split "id" column is number of phosphorylations detected, and column 4 is number of phosphorylations localized, so if the two values aren't equal, the row has at least one unlocalized site
df = df.drop(index=unlocalized_to_drop)
# Give it a multiindex
df = df.set_index(["Name", "Site", "Peptide", "Database_ID"])
df = df.drop(columns=["id", "id.description", "geneSymbol", "numColumnsVMsiteObserved", "bestScore", "bestDeltaForwardReverseScore",
"Best_scoreVML", "sequenceVML", "accessionNumber_VMsites_numVMsitesPresent_numVMsitesLocalizedBest_earliestVMsiteAA_latestVMsiteAA",
"protein_mw", "species", "speciesMulti", "orfCategory", "accession_number", "protein_group_num", "entry_name"]) # We don't need these. The dropped columns include a "geneSymbol" column that is a duplicate of the original GeneSymbol.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["acetylproteomics"] = df
elif file_name == "prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, index_col=0, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["geneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for geneSymbol, so we'll use that to filter them out.
df = df.drop(columns="Cytoband")
df["geneSymbol"] = df["geneSymbol"].str.rsplit('|', n=1, expand=True)[0] # Some of the geneSymbols have the gene IDs appended to them, to get rid of duplicates. We're going to create a multiindex with all the gene names and gene IDs, so we can drop the appended IDs.
df = df.rename(columns={"geneSymbol": "Name", "Gene.ID": "Database_ID"})
df = df.set_index(["Name", "Database_ID"])
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["CNV"] = df
elif file_name == "prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["GeneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
# Prepare some columns we'll need later for the multiindex
df["variableSites"] = df["variableSites"].str.replace(r"[a-z\s]", "") # Get rid of all lowercase delimeters and whitespace in the sites
df = df.rename(columns={
"GeneSymbol": "Name",
"variableSites": "Site",
"sequence": "Peptide", # We take this instead of sequenceVML, to match the other datasets' format
"accession_numbers": "Database_ID" # We take all accession numbers they have, instead of the singular accession_number column
})
# Some rows have at least one localized phosphorylation site, but also have other phosphorylations that aren't localized. We'll drop those rows, if their localized sites are duplicated in another row, to avoid creating duplicates, because we only preserve information about the localized sites in a given row. However, if the localized sites aren't duplicated in another row, we'll keep the row.
split_ids = df["id"].str.split('_', expand=True)
unlocalized_to_drop = df.index[~split_ids[3].eq(split_ids[4]) & df.duplicated(["Name", "Site", "Peptide", "Database_ID"], keep=False)] # Column 3 of the split "id" column is number of phosphorylations detected, and column 4 is number of phosphorylations localized, so if the two values aren't equal, the row has at least one unlocalized site
df = df.drop(index=unlocalized_to_drop)
# Give it a multiindex
df = df.set_index(["Name", "Site", "Peptide", "Database_ID"])
df = df.drop(columns=["id", "id.description", "geneSymbol", "numColumnsVMsiteObserved", "bestScore", "bestDeltaForwardReverseScore",
"Best_scoreVML", "Best_numActualVMSites_sty", "Best_numLocalizedVMsites_sty", "sequenceVML",
"accessionNumber_VMsites_numVMsitesPresent_numVMsitesLocalizedBest_earliestVMsiteAA_latestVMsiteAA", "protein_mw", "species",
"speciesMulti", "orfCategory", "accession_number", "protein_group_num", "entry_name"]) # We don't need these. The dropped columns include a "geneSymbol" column that is a duplicate of the original GeneSymbol.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["phosphoproteomics"] = df
elif file_name == "prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["GeneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
df = df.rename(columns={"GeneSymbol": "Name", "accession_numbers": "Database_ID"})
df = df.set_index(["Name", "Database_ID"])
df = df.drop(columns=["id", "id.description", "geneSymbol", "numColumnsProteinObserved", "numSpectraProteinObserved",
"protein_mw", "percentCoverage", "numPepsUnique", "scoreUnique", "species", "orfCategory", "accession_number",
"subgroupNum", "entry_name"]) # We don't need these. The dropped columns include a "geneSymbol" column that is a duplicate of GeneSymbol.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["proteomics"] = df
elif file_name == "prosp-brca-v3.1-rnaseq-fpkm-log2-row-norm-2comp.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, index_col=0, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["geneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
df = df.set_index("geneSymbol")
df = df.drop(columns="description") # We don't need this.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["transcriptomics"] = df
elif file_name == "prosp-brca-v3.1-sample-annotation.csv.gz":
df = | pd.read_csv(file_path, index_col=0) | pandas.read_csv |
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert str(info.value) == 'Inputs must both be pandas DataFrames.'
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = | pd.Index([1, 2, 3, 4]) | pandas.Index |
import copy
import glob
import os
import sys
import pprint
from itertools import groupby
from textwrap import wrap
import numpy as np
import pandas as pd
import pylab as plt
import tqdm
from .. import haven_jobs as hjb
from .. import haven_utils as hu
from .. import haven_share as hd
def get_score_df(
exp_list,
savedir_base,
filterby_list=None,
columns=None,
score_columns=None,
verbose=True,
wrap_size=8,
hparam_diff=0,
flatten_columns=True,
show_meta=True,
show_max_min=True,
add_prefix=False,
score_list_name="score_list.pkl",
in_latex_format=False,
avg_across=None,
return_columns=False,
show_exp_ids=False,
):
"""Get a table showing the scores for the given list of experiments
Parameters
----------
exp_list : list
A list of experiments, each defines a single set of hyper-parameters
columns : list, optional
a list of columns you would like to display, by default None
savedir_base : str, optional
A directory where experiments are saved
Returns
-------
DataFrame
a dataframe showing the scores obtained by the experiments
Example
-------
>>> from haven import haven_results as hr
>>> savedir_base='../results/isps/'
>>> exp_list = hr.get_exp_list(savedir_base=savedir_base,
>>> filterby_list=[{'sampler':{'train':'basic'}}])
>>> df = hr.get_score_df(exp_list, savedir_base=savedir_base, columns=['train_loss', 'exp_id'])
>>> print(df)
"""
if len(exp_list) == 0:
if verbose:
print("exp_list is empty...")
if return_columns:
return pd.DataFrame([]), [], []
else:
return pd.DataFrame([])
exp_list = hu.filter_exp_list(exp_list, filterby_list, savedir_base=savedir_base, verbose=verbose)
# aggregate results
hparam_list = set()
result_list = []
for exp_dict in exp_list:
result_dict = {}
exp_id = hu.hash_dict(exp_dict)
if avg_across is not None:
tmp_dict = copy.deepcopy(exp_dict)
del tmp_dict[avg_across]
result_dict["_" + avg_across] = hu.hash_dict(tmp_dict)
savedir = os.path.join(savedir_base, exp_id)
score_list_fname = os.path.join(savedir, score_list_name)
exp_dict_fname = os.path.join(savedir, "exp_dict.json")
if flatten_columns:
exp_dict_flat = hu.flatten_column(exp_dict, flatten_list=True)
else:
exp_dict_flat = exp_dict
hparam_columns = columns or list(exp_dict_flat.keys())
for hc in hparam_columns:
hparam_list.add(hc)
for k in hparam_columns:
if k == "exp_id":
continue
if add_prefix:
k_new = "(hparam) " + k
else:
k_new = k
if k not in exp_dict_flat:
continue
result_dict[k_new] = exp_dict_flat[k]
if os.path.exists(score_list_fname) and show_meta:
result_dict["started_at"] = hu.time_to_montreal(exp_dict_fname)
result_dict["creation_time"] = os.path.getctime(exp_dict_fname)
else:
result_dict["creation_time"] = -1
if show_exp_ids or "exp_id" in hparam_columns:
result_dict["exp_id"] = exp_id
# hparam_columns = [k for k in result_dict.keys() if k not in ['creation_time']]
if not os.path.exists(score_list_fname):
if verbose:
print("%s: %s is missing" % (exp_id, score_list_name))
else:
try:
score_list = hu.load_pkl(score_list_fname)
except Exception:
print("%s: %s is corrupt" % (exp_id, score_list_name))
score_df = pd.DataFrame(score_list)
metric_columns = score_columns or score_df.columns
if len(score_list):
for k in metric_columns:
if k not in score_df.columns:
continue
v = np.array(score_df[k])
if "float" in str(v.dtype):
v = v[~np.isnan(v)]
if len(v):
if add_prefix:
k_new = "(metric) " + k
else:
k_new = k
if "float" in str(v.dtype):
result_dict[k_new] = v[-1]
if show_max_min:
result_dict[k_new + " (max)"] = v.max()
result_dict[k_new + " (min)"] = v.min()
else:
result_dict[k_new] = v[-1]
result_list += [result_dict]
# create table
df = | pd.DataFrame(result_list) | pandas.DataFrame |
"""shell
pip install -r https://raw.githubusercontent.com/datamllab/automl-in-action-notebooks/master/requirements.txt
"""
"""
### Load the California housing price prediction dataset
"""
from sklearn.datasets import fetch_california_housing
house_dataset = fetch_california_housing()
# Import pandas package to format the data
import pandas as pd
# Extract features with their names into the a dataframe format
data = pd.DataFrame(house_dataset.data, columns=house_dataset.feature_names)
# Extract target with their names into a pd.Series object with name MEDV
target = | pd.Series(house_dataset.target, name="MEDV") | pandas.Series |
"""
This module contains function to plot smooth ROC curves using KFold
Examples:
result, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6)
plot_roc_curve_cv(result)
plt.show()
plot_specificity_cv(result)
plt.show()
plot_specificity_cv(result, invert_x=True, invert_y=True)
plt.show()
print(f"AUC: {np.mean(aucs)} (std:{np.std(aucs)})")
Comparing models:
result_xgb, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6, n_repeats=4)
result_rf, aucs = roc_curve_cv(RandomForestClassifier(), X, y, n_splits=6, n_repeats=4)
plot_specificity_cv({'XGB': result_xgb, 'RF':result_rf})
plt.show()
Comparing hyperparameters
results = []
for max_depth in (3,10):
for max_features in (0.5, 0.9):
result, _ = roc_curve_cv(
RandomForestClassifier(max_depth=max_depth, max_features=max_features),
x_full, y_full, n_repeats=4,
properties={'max features':max_features, 'max depth':max_depth})
results.append(result)
plot_specificity_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
plot_roc_curve_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
"""
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold
from numpy import interp
import numpy as np
from sklearn.metrics import roc_curve, auc
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import label_binarize
def roc_curve_simple(model, X, y):
y_pred = model.predict_proba(X)[:,1]
fpr, tpr, thres = roc_curve(y, y_pred)
result_df = pd.DataFrame({'fpr':fpr, 'tpr':tpr, 'threshold':thres}, index=range(len(fpr)))
return result_df, auc(fpr,tpr)
def roc_curve_cv(model, X, y, n_splits=5, n_repeats=1, properties=None):
if n_repeats > 1:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
else:
cv = StratifiedKFold(n_splits=n_splits)
auc_list = []
result_df = pd.DataFrame()
for i, (train, test) in enumerate(cv.split(X, y)):
x_train, x_test = X.iloc[train], X.iloc[test]
y_train, y_test = y.iloc[train], y.iloc[test]
model.fit(x_train, y_train)
y_test_pred = model.predict_proba(x_test)[:,1]
fpr, tpr, thres = roc_curve(y_test, y_test_pred)
# x_label = "False Positive Rate"
# y_label = "True Positive Rate"
df = pd.DataFrame({'run':i, 'fpr':fpr, 'tpr':tpr, 'threshold':thres}, index=range(len(fpr)))
result_df = pd.concat([result_df, df])
auc_list.append(auc(fpr,tpr))
if properties is not None:
for key, value, in properties.items():
result_df[key] = value
return result_df, auc_list
def plot_roc_curve_cv(result, n_step=100, title=None, **kwargs):
"""
plot the ROC curve with a confidence interval
"""
fpr_linspace = np.linspace(0,1,n_step)
tpr_df = pd.DataFrame()
x_label = "False Positive Rate"
y_label = "True Positive Rate"
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
result = result.rename(columns={'tpr':y_label, 'fpr':x_label})
group_cols = list(set(result.columns)-{x_label, y_label,'threshold'})
for name, group in result.groupby(group_cols):
df = pd.DataFrame(columns=[y_label, x_label]+group_cols)
df[y_label] = interp(fpr_linspace, group[x_label], group[y_label])
df[x_label] = fpr_linspace
df[group_cols] = name
tpr_df = pd.concat([tpr_df,df])
fig = plt.axes()
sns.lineplot(x=x_label, y =y_label, data=tpr_df, **kwargs)
if title is None:
title = "Roc curve cv"
fig.set_title(title)
return fig
def plot_specificity_cv(result, n_step=100, invert_x=False, invert_y=False, title=None, **kwargs):
"""
plot the curve of the specificity as a function of the sensibility
"""
tpr_linspace = np.linspace(0,1,n_step)
fpr_df = pd.DataFrame()
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
group_cols = list(set(result.columns)-{'fpr','tpr','threshold'})
for name, group in result.groupby(group_cols):
df = pd.DataFrame(columns=['tpr', 'fpr']+group_cols)
df['fpr'] = interp(tpr_linspace, group['tpr'], group['fpr'])[:-1]
df['tpr'] = tpr_linspace[:-1]
df[group_cols]=name
fpr_df = pd.concat([fpr_df,df])
if invert_x:
x_label = 'False Negative Rate'
fpr_df[x_label] = 1-fpr_df['tpr']
else:
x_label = 'Sensitivity'
fpr_df[x_label] = fpr_df['tpr']
if invert_y:
y_label = 'False Positive Rate'
fpr_df[y_label] = fpr_df['fpr']
else:
y_label = 'Specificity'
fpr_df[y_label] = 1-fpr_df['fpr']
fig = plt.axes()
sns.lineplot(x=x_label, y =y_label, data=fpr_df)
if title is None:
title = "Specificity vs Sensitivity"
fig.set_title(title)
return fig
def plot_roc_threshold_cv(result, n_step=101, title=None, tpr=True, fpr=True, tnr=False, fnr=False, **kwargs):
"""
plot the ROC curve with a confidence interval
"""
fpr_linspace = np.linspace(0,1,n_step)
tpr_df = pd.DataFrame()
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
threshold_dfs = []
group_cols = list(set(result.columns)-{'fpr','tpr','threshold'})
for name, group in result.groupby(group_cols):
group = group.sort_values(by='threshold')
if fpr:
df = pd.DataFrame(columns=['rate', 'metric','threshold']+group_cols)
df['rate'] = interp(fpr_linspace, group['threshold'], group['fpr'])
df['threshold'] = fpr_linspace
df['metric'] = 'FPR'
df[group_cols] = name
threshold_dfs.append(df)
if tpr:
df = pd.DataFrame(columns=['rate', 'metric','threshold']+group_cols)
df['rate'] = interp(fpr_linspace, group['threshold'], group['tpr'])
df['threshold'] = fpr_linspace
df['metric'] = 'TPR'
df[group_cols] = name
threshold_dfs.append(df)
if tnr:
df = pd.DataFrame(columns=['rate', 'metric','threshold']+group_cols)
df['rate'] = 1- interp(fpr_linspace, group['threshold'], group['fpr'])
df['threshold'] = fpr_linspace
df['metric'] = 'TNR'
df[group_cols] = name
threshold_dfs.append(df)
if fnr:
df = pd.DataFrame(columns=['rate', 'metric','threshold']+group_cols)
df['rate'] = 1- interp(fpr_linspace, group['threshold'], group['tpr'])
df['threshold'] = fpr_linspace
df['metric'] = 'FNR'
df[group_cols] = name
threshold_dfs.append(df)
threshold_df = | pd.concat(threshold_dfs) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.mark.parametrize(
"ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor]
)
def test_ufuncs_binary(ufunc):
# two BooleanArrays
a = pd.array([True, False, None], dtype="boolean")
result = ufunc(a, a)
expected = pd.array(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s, a)
expected = pd.Series(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
# Boolean with numpy array
arr = np.array([True, True, False])
result = ufunc(a, arr)
expected = pd.array(ufunc(a._data, arr), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
# BooleanArray with scalar
result = ufunc(a, True)
expected = pd.array(ufunc(a._data, True), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(True, a)
expected = pd.array(ufunc(True, a._data), dtype="boolean")
expected[a._mask] = np.nan
| tm.assert_extension_array_equal(result, expected) | pandas._testing.assert_extension_array_equal |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 09:57:39 2020
@author: emwe9516
"""
#import sklearn
from sklearn import svm
from sklearn import metrics
import pandas as pd # for reading file
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import rpy2.robjects as robjects
from timeit import default_timer as timer
robjects.r['load']("C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\dtm_train.RData")
dtm_train = robjects.r['dtm_train']
dtm_train = robjects.r['as.matrix'](dtm_train)
dtm_train = np.array(dtm_train)
robjects.r['load']("C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\dtm_test.RData")
dtm_test = robjects.r['dtm_test']
dtm_test = robjects.r['as.matrix'](dtm_test)
dtm_test = np.array(dtm_test)
#import sys
#print("Python version")
#print (sys.version)
#print('The scikit-learn version is {}.'.format(sklearn.__version__))
# The scikit-learn version is 0.21.3
#data = pd.read_csv('C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\dtm_train.txt', sep=" ")
#data_test = pd.read_csv('C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\dtm_test.txt', sep=" ")
#dtm_train = data.to_numpy()
#dtm_test = data_test.to_numpy()
ytrain = pd.read_csv('C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\y.train', sep=" ")
ytrain = ytrain['x'].astype('category')
ytest = pd.read_csv('C:\\Users\\emwe9516\\Desktop\\master_thesis_stats-master\\y.test', sep=" ")
ytest = ytest['x'].astype('category')
#def my_kernel(X, Y):
# return np.dot(X, Y.T)
#lin = svm.LinearSVC()
#lin.fit(dtm_train, ytrain)
#preds = lin.predict(dtm_test)
#metrics.accuracy_score(ytest, preds)
##################################################
## Testing
#clf = svm.SVC(decision_function_shape='ovo', kernel=my_kernel)
clf = svm.SVC(decision_function_shape='ovo', kernel="linear", C=200, cache_size=800)
clf.fit(dtm_train, ytrain)
preds = clf.predict(dtm_test)
metrics.accuracy_score(ytest, preds) # .4612
# .6476 for C=100
ytest.cat.categories
print(classification_report(ytest, preds, target_names=ytest.cat.categories))
############################################
# using kernel matrix
start = timer()
gram = np.dot(dtm_train, dtm_train.T)
end = timer()
print(end - start)
kernel_test = dtm_test@dtm_train.T # dot product
clfprecom = svm.SVC(decision_function_shape='ovo', kernel='precomputed', C=200, cache_size=800)
clfprecom.fit(gram, ytrain)
preds = clfprecom.predict(kernel_test)
preds = pd.Series(preds)
metrics.accuracy_score( ytest , preds)
# .6476 for C=100
##################################################
# GRID SEARCH CROSS VALIDATION
# https://stats.stackexchange.com/questions/31066/what-is-the-influence-of-c-in-svms-with-linear-kernel
# https://stackoverflow.com/questions/24595153/is-it-possible-to-tune-parameters-with-grid-search-for-custom-kernels-in-scikit
# Set the parameters by cross-validation
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
# 'C': [1, 10, 100, 1000]},
# {'kernel': ['linear'], 'C': [2e-4, 2e-3, 2e-2, 2e-1, 2e0, 2e1, 2e2, 2e3, 2e4]}]
tuned_parameters = [ {'kernel': ['linear'], 'C': [ 2e-1, 2e0, 2e1, 2e2, 2e3, 2e4]} ]
#scores = ['precision', 'recall']
scores = ['precision']
start = timer()
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
svm.SVC(decision_function_shape='ovo', kernel='precomputed'), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(gram, ytrain)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = ytest, clf.predict(kernel_test)
print(classification_report(y_true, y_pred))
print()
end = timer()
print(end - start)
clf.best_params_['C']
clfprecom = svm.SVC(decision_function_shape='ovo', kernel='precomputed', C=clf.best_params_['C'], cache_size=800)
#clfprecom = svm.SVC(decision_function_shape='ovo', kernel='precomputed', C=200, cache_size=800)
#gram = np.dot(dtm_train, dtm_train.T)
clfprecom.fit(gram, ytrain)
#kernel_test = <EMAIL> # dot product
preds = clfprecom.predict(kernel_test)
preds = pd.Series(preds)
metrics.accuracy_score( ytest , preds) #.6464
confmat = confusion_matrix(ytest, preds)
report = classification_report(ytest, preds, target_names=ytest.cat.categories, output_dict=True)
print(classification_report(ytest, preds, target_names=ytest.cat.categories) )
report_df = pd.DataFrame(report).transpose()
bestC = {'bestC' : [clf.best_params_['C']]}
| pd.DataFrame.from_dict(bestC) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]',
'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy',
'Sharpe Ratio', 'Calmar Ratio', 'Omega Ratio', 'Sortino Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(required_return=0.1, risk_free=0.01)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -227.45862849586334, -65.40868619923044, 0.0, -19.104372472268942
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(use_asset_returns=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997,
150.0, 5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966, np.nan,
-54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0, -0.10999000000000003,
np.nan, np.nan, 0.0, np.nan
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(incl_open=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -3.9702970297029667, -54.450495049504966,
np.nan, -29.210396039603964, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.1552449999999999, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf_grouped.stats(column='first'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 200.0, 194.95809, -2.520955, 275.0, -0.505305454620791,
0.82091, 2.46248125751388, pd.Timedelta('4 days 00:00:00'), 4, 2, 2, -4.512618181818182,
0.0, -54.450495049504966, -388.2424242424243, np.nan, -221.34645964596461, pd.NaT,
pd.Timedelta('2 days 00:00:00'), 0.0, -0.2646459090909091, -20.095906945591288,
-34.312217430388344, 0.0, -14.554511690523578
]),
index=stats_index,
name='first')
)
pd.testing.assert_series_equal(
pf.stats(column='a', tags='trades and open and not closed', settings=dict(incl_open=True)),
pd.Series(
np.array([
1, -0.20049999999999982
]),
index=pd.Index([
'Total Open Trades', 'Open Trade PnL'
], dtype='object'),
name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda trades: trades.winning_streak.max(),
resolve_trades=True
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda self, group_by: self.get_trades(group_by=group_by).winning_streak.max()
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda self, settings:
self.get_trades(group_by=settings['group_by']).winning_streak.max(),
resolve_calc_func=False
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
vbt.settings.portfolio.stats['settings']['my_arg'] = 100
my_arg_metric = ('my_arg_metric', dict(title='My Arg', calc_func=lambda my_arg: my_arg))
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a'),
pd.Series([100], index=['My Arg'], name='a')
)
vbt.settings.portfolio.stats.reset()
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a', settings=dict(my_arg=200)),
pd.Series([200], index=['My Arg'], name='a')
)
my_arg_metric = ('my_arg_metric', dict(title='My Arg', my_arg=300, calc_func=lambda my_arg: my_arg))
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a', settings=dict(my_arg=200)),
pd.Series([300], index=['My Arg'], name='a')
)
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a', settings=dict(my_arg=200),
metric_settings=dict(my_arg_metric=dict(my_arg=400))),
pd.Series([400], index=['My Arg'], name='a')
)
trade_min_pnl_cnt = (
'trade_min_pnl_cnt',
dict(
title=vbt.Sub('Trades with PnL over $$${min_pnl}'),
calc_func=lambda trades, min_pnl: trades.apply_mask(
trades.pnl.values >= min_pnl).count(),
resolve_trades=True
)
)
pd.testing.assert_series_equal(
pf.stats(
metrics=trade_min_pnl_cnt, column='a',
metric_settings=dict(trade_min_pnl_cnt=dict(min_pnl=0))),
pd.Series([0], index=['Trades with PnL over $0'], name='a')
)
pd.testing.assert_series_equal(
pf.stats(
metrics=[
trade_min_pnl_cnt,
trade_min_pnl_cnt,
trade_min_pnl_cnt
],
column='a',
metric_settings=dict(
trade_min_pnl_cnt_0=dict(min_pnl=0),
trade_min_pnl_cnt_1=dict(min_pnl=10),
trade_min_pnl_cnt_2=dict(min_pnl=20))
),
pd.Series([0, 0, 0], index=[
'Trades with PnL over $0',
'Trades with PnL over $10',
'Trades with PnL over $20'
], name='a')
)
pd.testing.assert_frame_equal(
pf.stats(metrics='total_trades', agg_func=None, settings=dict(trades_type='entry_trades')),
pd.DataFrame([2, 2, 2], index=price_na.columns, columns=['Total Trades'])
)
pd.testing.assert_frame_equal(
pf.stats(metrics='total_trades', agg_func=None, settings=dict(trades_type='exit_trades')),
pd.DataFrame([2, 2, 2], index=price_na.columns, columns=['Total Trades'])
)
pd.testing.assert_frame_equal(
pf.stats(metrics='total_trades', agg_func=None, settings=dict(trades_type='positions')),
pd.DataFrame([2, 2, 2], index=price_na.columns, columns=['Total Trades'])
)
pd.testing.assert_series_equal(
pf['c'].stats(),
pf.stats(column='c')
)
pd.testing.assert_series_equal(
pf['c'].stats(),
pf_grouped.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
pf_grouped['second'].stats(),
pf_grouped.stats(column='second')
)
pd.testing.assert_series_equal(
pf_grouped['second'].stats(),
pf.stats(column='second', group_by=group_by)
)
pd.testing.assert_series_equal(
pf.replace(wrapper=pf.wrapper.replace(freq='10d')).stats(),
pf.stats(settings=dict(freq='10d'))
)
stats_df = pf.stats(agg_func=None)
assert stats_df.shape == (3, 28)
pd.testing.assert_index_equal(stats_df.index, pf.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_returns_stats(self):
pd.testing.assert_series_equal(
pf.returns_stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), -0.3104900000000077, 150.0, -20.30874297799884,
1.7044081500801351, 0.3104900000000077, | pd.Timedelta('4 days 00:00:00') | pandas.Timedelta |
def rd_kinLL():
from pandas import read_csv
fname='/home/sespub/teds90_camx/REAS/line/df_kinLL.csv'
df_kin=read_csv(fname)
utme=list(df_kin['x97'])
utmn=list(df_kin['y97'])
DICT=list(df_kin['D'])
subX=[44,50,51]
for i in range(len(DICT)):
cntyi=int(DICT[i]/100)
if cntyi in subX:utme[i]=utme[i]-201
return (utme,utmn,list(df_kin['R']),DICT)
def rd_kin():
fname='/home/sespub/teds90_camx/REAS/line/LE-loc12.KIN'
with open(fname) as text_file:
d=[line.strip('\n').split()[0] for line in text_file]
d=d[1:]
cnty=[int(i[0:2]) for i in d]
DICT=[i[0:4] for i in d]
utme=[int(i[4:7]) for i in d]
rd_typ=[int(i[-1]) for i in d]
utmn=[int(i[7:11]) for i in d]
subX=[44,50,51]
for i in range(len(d)):
if cnty[i] in subX:utme[i]=utme[i]-201
return (utme,utmn,rd_typ,cnty)
def rd_EM():
from scipy.io import FortranFile
import numpy as np
NVTYP=13;NVEH=NVTYP;NPOL=10;NREC=33012
fname='/home/sespub/teds90_camx/REAS/line/cl102.bin'
f=FortranFile(fname, 'r')
EM=f.read_record(dtype=np.float32)
f.close()
EM=np.reshape(EM,[NREC,NPOL,NVEH])
return (NVTYP,NPOL,NREC,EM)
def rd_BIN(NC,LTYP,N,M):
from scipy.io import FortranFile
import numpy as np
fname='/home/sespub/teds90_camx/REAS/line/102LVOC.BIN'
f=FortranFile(fname, 'r')
VOCB=f.read_record(dtype=np.float32)
f.close()
VOCB=np.reshape(VOCB,[NC,LTYP,N,M])
VOCB[:,0,:,:]=0.
return VOCB
def rd_hwcsv():
from pandas import read_csv
fname='105_LINE_HW.csv'
df_t=read_csv(fname)
df_t['DICT']=[int(i/100) for i in list(df_t['DICT'])]
s1=list(set(df_t['DICT']))
s1.sort()
sdf2csv={x:y for x,y in zip(s1,s1)}
sdf2csv.update({36:17,41:21,42:2})
return (df_t,sdf2csv)
def rd_cems():
from pandas import read_csv
fname='105_point_cems.csv'
df_t= | read_csv(fname) | pandas.read_csv |
import numpy as np
import pandas as pd
data = np.array([1,2,3,5,6])
df = pd.Series(data)
data[0]
obj = pd.Series([1,2,-3,4,-5], index=['d', 'b', 'c', 'a', 'e'])
obj[obj > 0]
obj ** 2
cities = {'Bangalore': 2000, 'Mysore':4000}
obj = pd.Series(cities)
cities_list = ['Mandya']
obj = pd.Series(cities, index = cities_list)
df = pd.DataFrame({'city': ['Bangalore', 'Mysore'], 'state': ['Karnataka', 'Karnataka']})
data = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
df1 = | pd.DataFrame(data, index=['first', 'second'], columns=['a', 'b']) | pandas.DataFrame |
import itertools
from typing import List, Optional, Union
import numpy as np
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.construction import extract_array
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_compressed_ids,
get_group_index,
)
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(
self, index, level=-1, constructor=None,
):
if constructor is None:
constructor = DataFrame
self.constructor = constructor
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = | libalgos.groupsort_indexer(comp_index, ngroups) | pandas._libs.algos.groupsort_indexer |
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in | pd.unique(info.gmap.iloc[:, 0]) | pandas.unique |
"""
Utilities to get a call tree (from the output of ``cube_dump -w``).
A call tree is represented as a tree of
``(function_name,cnode_id,parent,[list of children])``
named tuples (of class ``CubeTreeNode``)
"""
import logging
from tree_parsing import collect_hierarchy,level_fun
from box import Box
import pandas as pd
import re
from cube_file_utils import get_lines, get_cube_dump_w_text
class CubeTreeNode(Box):
"""
Holds attributes of a tree node read from cube commands such as cube dump.
For a node of the cube call tree, the following attributes are available:
.. py:attribute:: fname
The name of the function;
.. py:attribute:: cnode_id
The unique ID related to the node in the call tree, read from id=value in string;
.. py:attribute:: parent
A binding to the parent node (can be ``None``);
.. py:attribute:: children
A list of bindings to child nodes.
And others from cube_dump output.
"""
def __repr__(root):
""" An implementation for '__repr__'.
Prints only the beginning and the end of the call tree.
"""
lines = calltree_to_string(root).split("\n")
res = lines[:5] + ["..."] + lines[-6:]
l = max(len(line) for line in res)
res = ["", "=" * l] + res + ["=" * l, ""]
return "\n".join(res)
def __init__(self,*args,**kwargs):
if 'frozen_box' in kwargs:
del kwargs['frozen_box']
super().__init__(*args, frozen_box = True, **kwargs)
def iterate_on_call_tree(root, maxlevel=None):
"""Iterator on a tree (Generator).
Can be used for searching in the tree, depth-first.
Parameters
----------
root: CubeTreeNode
a CubeTreeNode representing the root of the tree;
maxlevel : int or None
the maximum depth of the recursion (``None`` means unlimited).
Returns
-------
res : CubeTreeNode
Iterator yielding ``CubeTreeNode`` s.
"""
yield root
new_maxlevel = maxlevel - 1 if maxlevel is not None else None
if len(root.children) != 0 and (maxlevel is None or maxlevel > 0):
for child in root.children:
yield from iterate_on_call_tree(child, new_maxlevel)
def calltree_to_df(call_tree, full_path=False):
"""Convert a call tree into a DataFrame.
Parameters
----------
call_tree : CubeTreeNode
Recursive representation of a call tree
full_path : bool
Whether or not the full path needs to be in the output as a column
Returns
-------
df : DataFrame
A dataframe with "Function Name", "Cnode ID", "Parent Cnode ID",
"Level" and optionally "Full Callpath" as columns.
"""
tuples = [
(n.fname, n.cnode_id, n.parent.cnode_id if n.parent is not None else pd.NA)
for n in iterate_on_call_tree(call_tree)
]
df = pd.DataFrame(
data=tuples, columns=["Function Name", "Cnode ID", "Parent Cnode ID"]
)
if full_path:
# full callpath vs cnode id for convenience
data = get_fpath_vs_id(call_tree)
fullpath_vs_id = | pd.DataFrame(data, columns=["Cnode ID", "Full Callpath"]) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
from scipy import sqrt
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from .CostModule import CostModule
from .WeatherDelay import WeatherDelay
import traceback
# constants
km_per_m = 0.001
hr_per_min = 1/60
m_per_ft = 0.3048
class ErectionCost(CostModule):
"""
ErectionCost.py
Created by <NAME> and <NAME> on Mar. 16, 2018
Created by <NAME> and <NAME> on 01 June 2019
Calculates the costs for erecting the tower and rotor nacelle assembly for land-based wind projects
(items in brackets are not yet implemented)
[Get terrain complexity]
[Get site complexity]
Get number of turbines
Get duration of construction
Get rate of deliveries
Get daily hours of operation
Get turbine rating
Get component specifications
[Get crane availability]
Get price data
Get labor mobilization_prices by crew type
Get labor prices by crew type
Get equipment mobilization prices by equipment type
Get fuel prices
Get equipment prices by equipment type
Calculate operational time for lifting components
Estimate potential time delays due to weather
Calculate required labor and equip for erection (see equip_labor_by_type method below)
Calculate number of workers by crew type
Calculate man hours by crew type
Calculate number of equipment by equip type
Calculate equipment hours by equip type
Calculate erection costs by type (see methods below)
Calculate mobilization costs as function of number of workers by crew type, number of equipment by equipment type, labor_mobilization_prices, and equip_mobilization_prices
Calculate labor costs as function of man_hours and labor prices by crew type
Calculate fuel costs as function of equipment hours by equipment type and fuel prices by equipment type
Calculate equipment costs as function of equipment hours by equipment type and equipment prices by equipment type
Sum erection costs over all types to get total costs
Find the least cost option
Return total erection costs
Keys in the input dictionary are the following:
construct_duration
(int) duration of construction (in months)
rate_of_deliveries
(int) rate of deliveries (number of turbines per week)
weather_window
(pd.DataFrame) window of weather data for project of interest.
wind_shear_exponent
- (float) The exponent of the power law wind shear calculation
overtime_multiplier:
(float) multiplier for overtime work (working 60 hr/wk vs 40 hr/wk)
allow_same_flag
(bool) boolean flag to indicate whether choosing same base and
topping crane is allowed.
operational_construction_time
(int) Number of hours per day when construction can happen.
time_construct
(int) 'normal' (10 hours per day) or 'long' (24 hours per day)
project_data
(dict) dictionary of pd.DataFrame for each of the csv files loaded
for the project.
In turn, the project_data dictionary contains key value pairs of the
following:
crane_specs:
(pd.DateFrame) Specs about the cranes for the cost calculations.
equip
(pd.DataFrame) Equipment needed for various tasks
crew
(pd.DataFrame) Crew configurations needed for various tasks
components
(pd.DataFrame) components to build a wind turbine
project
(pd.DataFrame) The project of the project to calculate.
equip_price
(pd.DatFrame) Prices to operate various pieces of equipment.
crew_price
(pd.DataFrame) THe prices for various crews
material_price
(pd.DatFrame) Prices for various materials used during erection.
rsmeans
(p.DataFrame) RSMeans data
"""
def __init__(self, input_dict, output_dict, project_name):
"""
Parameters
----------
input_dict : dict
The input dictionary with key value pairs described in the
class documentation
output_dict : dict
The output dictionary with key value pairs as found on the
output documentation.
"""
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
def run_module(self):
"""
Runs the ErectionCost model and populates the IO dictionaries with
calculated values.
Returns
-------
int
0 if the module ran successfully. 1 if the module did not run
successfully
"""
try:
self.calculate_costs()
self.outputs_for_detailed_tab()
self.output_dict['erection_module_type_operation'] = self.outputs_for_costs_by_module_type_operation(
input_df=self.output_dict['total_erection_cost'],
project_id=self.project_name,
total_or_turbine=True
)
return 0, 0 # Module ran successfully
except Exception as error:
traceback.print_exc()
return 1, error # Module did not run successfully
def outputs_for_detailed_tab(self):
"""
Creates a list of dictionaries which can be used on their own or
used to make a dataframe.
Must be called after self.run_module()
Returns
-------
list(dict)
A list of dicts, with each dict representing a row of the data.
"""
result =[]
for row in self.output_dict['component_name_topvbase'].itertuples():
dashed_row = '{} - {}'.format(row[1], row[2])
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'component_name_topvbase: Operation - Top or Base',
'value': dashed_row
})
for row in self.output_dict['crane_choice'].itertuples():
dashed_row = '{} - {} - {}'.format(row[1], row[2], row[3])
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'crane_choice: Crew name - Boom system - Operation',
'value': dashed_row
})
for _, row in self.output_dict['crane_data_output'].iterrows():
dashed_row = '{} - {} - {}'.format(row[0], row[1], row[2])
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'crane_data_output: crane_boom_operation_concat - variable - value',
'value': dashed_row,
'last_number': row[2]
})
for _, row in self.output_dict['crane_cost_details'].iterrows():
dashed_row = '{} - {} - {}'.format(row[0], row[1], row[2])
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'crane_cost_details: Operation ID - Type of cost - Cost',
'value': dashed_row,
'last_number': row[2]
})
for _, row in self.output_dict['total_erection_cost'].iterrows():
dashed_row = '{} - {} - {}'.format(row[0], row[1], row[2])
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'total_erection_cost: Phase of construction - Type of cost - Cost USD',
'value': dashed_row,
'last_number': row[2]
})
for _, row in self.output_dict['erection_selected_detailed_data'].iterrows():
value = row['Labor cost USD']
operation = row['Operation']
result.append({
'unit': 'usd',
'type': 'dataframe',
'variable_df_key_col_name': f'erection_selected_detailed_data: crew cost',
'value': value,
'non_numeric_value': operation
})
for _, row in self.output_dict['erection_selected_detailed_data'].iterrows():
value = row['Mobilization cost USD']
crane_boom_operation_concat = row['crane_boom_operation_concat']
result.append({
'unit': 'usd',
'type': 'dataframe',
'variable_df_key_col_name': 'erection_selected_detailed_data: mobilization',
'value': value,
'non_numeric_value': crane_boom_operation_concat
})
for _, row in self.output_dict['erection_selected_detailed_data'].iterrows():
value = row['Wind multiplier']
operation = row['Operation']
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': f'erection_selected_detailed_data: wind multiplier',
'value': value,
'non_numeric_value': operation
})
result.append({
'unit': 'usd',
'type': 'variable',
'variable_df_key_col_name': 'total_cost_summed_erection',
'value': float(self.output_dict['total_cost_summed_erection'])
})
for _, row in self.output_dict['management_crews_cost'].iterrows():
result.append({
'unit': '',
'type': 'dataframe',
'variable_df_key_col_name': 'management_crews_cost: {}'.format(' <-> '.join(row.index)),
'value': ' <-> '.join(list(str(x) for x in row)[1:])
})
module = type(self).__name__
for _dict in result:
_dict['project_id_with_serial'] = self.project_name
_dict['module'] = module
self.output_dict['erection_cost_csv'] = result
return result
def calculate_erection_operation_time(self):
"""
Calculates operation time required for each type of equipment included in project data.
self.output_dict['possible_cranes'] = possible_cranes
self.output_dict['erection_operation_time'] = erection_operation_time_dict
self.input_dict keys
---------------------
construct_duration : int
int duration of construction (in months)
operational_construction_time : int
Number of hours each day that are available for construction hours.
self.output_dict keys
---------------------
self.output_dict['possible_cranes'] : possible_cranes (with geometry)
self.output_dict['erection_operation_time'] : Operation time for each crane.
Returns
-------
pd.DataFrame, pd.DataFrame
Dataframe of possible_cranes (with geometry) and operational time for cranes
"""
project_data = self.input_dict['project_data']
construct_duration = self.input_dict['construct_duration']
operational_construction_time = self.input_dict['operational_construction_time']
erection_construction_time = 1 / 3 * construct_duration
breakpoint_between_base_and_topping_percent = self.input_dict['breakpoint_between_base_and_topping_percent']
hub_height_m = self.input_dict['hub_height_meters']
rotor_diameter_m = self.input_dict['rotor_diameter_m']
num_turbines = float(self.input_dict['num_turbines'])
turbine_spacing_rotor_diameters = self.input_dict['turbine_spacing_rotor_diameters']
# for components in component list determine if base or topping
project_data['components']['Operation'] = project_data['components']['Lift height m'] > (
float(hub_height_m * breakpoint_between_base_and_topping_percent))
boolean_dictionary = {True: 'Top', False: 'Base'}
project_data['components']['Operation'] = project_data['components']['Operation'].map(boolean_dictionary)
# For output to a csv file
self.output_dict['component_name_topvbase'] = project_data['components'][['Component', 'Operation']]
# create groups for operations
top_v_base = project_data['components'].groupby(['Operation'])
# group crane data by boom system and crane name to get distinct cranes
crane_grouped = project_data['crane_specs'].groupby(
['Equipment name', 'Equipment ID', 'Crane name', 'Boom system', 'Crane capacity tonne'])
# Calculate the crane lift polygons
crane_poly = self.calculate_crane_lift_polygons(crane_grouped=crane_grouped)
# loop through operation type (topping vs. base)
component_max_speed = pd.DataFrame()
for name_operation, component_group in top_v_base:
lift_max_wind_speed = self.calculate_component_lift_max_wind_speed(component_group=component_group,
crane_poly=crane_poly,
component_max_speed=component_max_speed,
operation=name_operation)
crane_poly = lift_max_wind_speed['crane_poly']
component_max_speed = lift_max_wind_speed['component_max_speed']
# Sorting can help for some operations, but isn't strictly necessary, so it can be turned
# off when not debugging
# component_max_speed = component_max_speed.sort_values(by=['Crane name', 'Boom system', 'Component'])
# join crane polygon to crane specs
crane_component = pd.merge(crane_poly, component_max_speed, on=['Crane name', 'Boom system'])
# select only cranes that could lift the component
possible_cranes = crane_component.where(crane_component['crane_bool'] == True).dropna(thresh=1).reset_index(
drop=True)
# calculate travel time per cycle
turbine_spacing = float(
turbine_spacing_rotor_diameters * rotor_diameter_m * km_per_m)
possible_cranes['Travel time hr'] = turbine_spacing / possible_cranes['Speed of travel km per hr'] * num_turbines
# CRANE BREAKDOWNS: This is where you could add time for breakdown.
# calculate erection time
possible_cranes['Operation time hr'] = ((possible_cranes['Lift height m'] / possible_cranes[
'Hoist speed m per min'] * hr_per_min)
+ (possible_cranes['Cycle time installation hrs'])
) * num_turbines
# store setup time
possible_cranes['Setup time hr'] = possible_cranes['Setup time hr'] * num_turbines
# check that crane can lift all components within a group (base vs top)
crane_lift_entire_group_for_operation = crane_component.groupby(by=['Crane name', 'Boom system', 'Operation'])[
'crane_bool'].all()
# if it can't then we need to remove it.
# otherwise we end up with an option for a crane to perform an operation without lifting all of the corresponding components
testcranenew = possible_cranes.merge(crane_lift_entire_group_for_operation,
on=['Crane name', 'Boom system', 'Operation'])
possible_cranes = testcranenew.loc[testcranenew['crane_bool_y']]
erection_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system', 'Operation'])['Operation time hr'].sum()
travel_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system', 'Operation'])['Travel time hr'].max()
setup_time = possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system', 'Operation'])['Setup time hr'].max()
rental_time_without_weather = erection_time + travel_time + setup_time
operation_time = rental_time_without_weather.reset_index()
operation_time = operation_time.rename(columns={0: 'Operation time all turbines hrs'})
operation_time['Operational construct days'] = (operation_time['Operation time all turbines hrs'] /
operational_construction_time)
# if more than one crew needed to complete within construction duration then assume that all construction happens
# within that window and use that time frame for weather delays; if not, use the number of days calculated
operation_time['time_construct_bool'] = (operation_time['Operational construct days'] >
erection_construction_time * 30)
boolean_dictionary = {True: erection_construction_time * 30, False: np.NAN}
operation_time['time_construct_bool'] = operation_time['time_construct_bool'].map(boolean_dictionary)
operation_time['Time construct days'] = operation_time[
['time_construct_bool', 'Operational construct days']].min(axis=1)
# print(possible_cranes[['Crane name', 'Component', 'Operation time hr', 'Operation']])
for operation, component_group in top_v_base:
unique_component_crane = possible_cranes.loc[possible_cranes['Operation'] == operation][
'Component'].unique()
for component in component_group['Component']:
if component not in unique_component_crane:
raise Exception(
'Error: Unable to find installation crane for {} operation and {} component'.format(operation,
component))
erection_operation_time_dict = dict()
erection_operation_time_dict['possible_cranes'] = possible_cranes
erection_operation_time_dict['operation_time'] = operation_time
self.output_dict['possible_cranes'] = possible_cranes
self.output_dict['erection_operation_time'] = erection_operation_time_dict
return possible_cranes, operation_time
def calculate_offload_operation_time(self):
"""
Calculates time for the offload operation.
self.input_dict keys
--------------------
project_data : dict
dict of data frames for each of the csv files loaded for the project
operational_construction_time : int
operational hours of construction
rate_of_deliveries : int
rate of deliveries of turbines ready for erection.
self.output_dict key
--------------------
possible_cranes : pd.DataFrame
Dataframe of cranes possibly available for the operation
operation_time : int
Integer of number of hours per day construction can proceed.
"""
project_data = self.input_dict['project_data']
operational_construction_time = self.input_dict['operational_construction_time']
rate_of_deliveries = self.input_dict['rate_of_deliveries']
rotor_diameter_m = self.input_dict['rotor_diameter_m']
num_turbines = float(self.input_dict['num_turbines'])
turbine_spacing_rotor_diameters = self.input_dict['turbine_spacing_rotor_diameters']
offload_cranes = project_data['crane_specs'].where(
project_data['crane_specs']['Equipment name'] == 'Offload crane')
# group crane data by boom system and crane name to get distinct cranes
crane_grouped = offload_cranes.groupby(
['Equipment name', 'Equipment ID', 'Crane name', 'Boom system', 'Crane capacity tonne'])
crane_poly = self.calculate_crane_lift_polygons(crane_grouped=crane_grouped)
component_group = project_data['components']
component_max_speed = pd.DataFrame()
lift_max_wind_speed = self.calculate_component_lift_max_wind_speed(component_group=component_group,
crane_poly=crane_poly,
component_max_speed=component_max_speed,
operation='offload')
component_max_speed = lift_max_wind_speed['component_max_speed']
crane_poly = lift_max_wind_speed['crane_poly']
if len(crane_poly) != 0:
# join crane polygon to crane specs
crane_component = pd.merge(crane_poly, component_max_speed, on=['Crane name', 'Boom system'])
# select only cranes that could lift the component
possible_cranes = crane_component.where(crane_component['crane_bool'] == True).dropna(thresh=1).reset_index(
drop=True)
# calculate travel time per cycle
turbine_spacing = float(
turbine_spacing_rotor_diameters * rotor_diameter_m * km_per_m)
turbine_num = float(self.input_dict['num_turbines'])
possible_cranes['Travel time hr'] = turbine_spacing / possible_cranes['Speed of travel km per hr'] * num_turbines
# calculate erection time
possible_cranes['Operation time hr'] = ((possible_cranes['Lift height m'] / possible_cranes[
'Hoist speed m per min'] * hr_per_min)
+ (possible_cranes['Offload cycle time hrs'])
) * turbine_num
# store setup time
possible_cranes['Setup time hr'] = possible_cranes['Setup time hr'] * turbine_num
erection_time = \
possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system'])['Operation time hr'].sum()
travel_time = \
possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system'])['Travel time hr'].max()
setup_time = \
possible_cranes.groupby(['Crane name', 'Equipment name', 'Crane capacity tonne', 'Crew type ID',
'Boom system'])['Setup time hr'].max()
rental_time_without_weather = erection_time + travel_time + setup_time
operation_time = rental_time_without_weather.reset_index()
operation_time = operation_time.rename(columns={0: 'Operation time all turbines hrs'})
operation_time['Operational construct days'] = (operation_time['Operation time all turbines hrs'] /
operational_construction_time)
# if more than one crew needed to complete within construction duration
# then assume that all construction happens within that window and use
# that timeframe for weather delays; if not, use the number of days calculated
operation_time['time_construct_bool'] = (turbine_num / operation_time['Operational construct days'] * 6
> float(rate_of_deliveries))
boolean_dictionary = {True: (float(turbine_num) / (float(rate_of_deliveries) / 6)), False: np.NAN}
operation_time['time_construct_bool'] = operation_time['time_construct_bool'].map(boolean_dictionary)
operation_time['Time construct days'] = operation_time[
['time_construct_bool', 'Operational construct days']].max(
axis=1)
possible_cranes['Operation'] = 'Offload'
operation_time['Operation'] = 'Offload'
else:
possible_cranes = []
operation_time = []
# print(possible_cranes[['Crane name', 'Component', 'Operation time hr']])
unique_components = project_data['components']['Component'].unique()
unique_component_crane = possible_cranes['Component'].unique()
for component in unique_components:
if component not in unique_component_crane:
raise Exception('Error: Unable to find offload crane for {}'.format(component))
return possible_cranes, operation_time
def calculate_crane_lift_polygons(self, crane_grouped):
"""
Here we associate polygons with each crane. However, these polygons are not shapes
for the lift. Rather, they define functions f(x), where x is a crane lift load and
f(x) is the height to which that load can be lifted. To find out whether the crane
can lift a particular load, one just needs to check whether a point x (lift mass in
tonnes) and y (lift height in m) lies within the crane's polygon.
Parameters
----------
crane_grouped : pandas.core.groupby.generic.DataFrameGroupBy
The aggregation of the cranes to compute the lift polygons for. The columns
in the aggregation are assume to be 'Equipment name', 'Crane name', 'Boom system',
'Crane capacity tonne'
Returns
-------
pd.DataFrame
A dataframe of the cranes and their lifting polygons.
"""
crane_poly = pd.DataFrame(
columns=['Equipment name', 'Equipment ID', 'Crane name', 'Boom system', 'Crane capacity tonne', 'Crane poly'])
for (equipment_name, equipment_id, crane_name, boom_system, crane_capacity_tonne), crane in crane_grouped:
crane = crane.reset_index(drop=True)
x = crane['Max capacity tonne']
y = crane['Hub height m']
wind_speed = min(crane['Max wind speed m per s'])
hoist_speed = min(crane['Hoist speed m per min'])
travel_speed = min(crane['Speed of travel km per hr'])
setup_time = max(crane['Setup time hr'])
crew_type = crane.loc[0, 'Crew type ID'] # For every crane/boom combo the crew is the same, so we can just take first crew.
polygon = Polygon([(0, 0), (0, max(y)), (min(x), max(y)), (max(x), min(y)), (max(x), 0)])
df = pd.DataFrame([[equipment_name,
equipment_id,
crane_name,
boom_system,
crane_capacity_tonne,
wind_speed,
setup_time,
hoist_speed,
travel_speed,
crew_type,
polygon]],
columns=['Equipment name', 'Equipment ID', 'Crane name', 'Boom system', 'Crane capacity tonne',
'Max wind speed m per s', 'Setup time hr',
'Hoist speed m per min', 'Speed of travel km per hr',
'Crew type ID', 'Crane poly'])
crane_poly = crane_poly.append(df, sort=True)
return crane_poly
def calculate_component_lift_max_wind_speed(self, *, component_group, crane_poly, component_max_speed, operation):
"""
First, using the height and mass of the component being lifted, this method determines
if a component can be lifted to the necessary height by each crane.
Also, creates a dataframe that has the maximum wind speeds to lift particular components,
given the component data and crane lift data given in the arguments.
For the maximum wind speed calculations, we use these equations to calculation vmax,
which is the maximum permissible wind speed:
vmax = max_TAB * sqrt(1.2 * mh / aw), where
mh = hoist load
aw = area exposed to wind = surface area * coeff drag
1.2 = constant in m^2 / t
vmax_tab = maximum load speed per load chart
(source: pg. 33 of Liebherr)
See the source code for this method on how this calculation is used.
Parameters
----------
component_group : pd.DataFrame
Dataframe with component data.
crane_poly : pd.DataFrame
Data about cranes doing the lifting. The polygons are specifications of
functions that define lift height f(x) as a function of component mass x.
component_max_speed : pd.DataFrame
The dataframe into which maximum wind speeds for lifting each component
will be accumulated. For the first call into this method, pass in an
empty dataframe created with pd.DataFrame
operation : str
The name of the operation ("base", "top" or "offload") that the cranes
are performing for this calculation. If the operation is "Offload"
the 'Mass tonne' is divided by two when making the lift polygons.
This created the assumption that there are always 2 offload cranes during
offload operations. (See the calculate_crane_lift_polygons() method
above for more about calculating the lift polygons.
Returns
-------
dict
Returns a dict of pd.DataFrame values. The key "component_max_speed" is the
the dataframe of max component speeds. The key "crane_poly" is a COPY of the
crane_poly dataframe passed as a parameter to this function and with a column
of "Crane bool {operation}" attached.
"""
for idx, crane in crane_poly.iterrows():
polygon = crane['Crane poly']
# calculate polygon for crane capacity and check if component can be lifted by each crane without wind loading
for component in component_group['Component']:
# get weight and height of component in each component group
component_only = component_group.where(component_group['Component'] == component).dropna(thresh=1)
# See docstring for "operation" parameter above about mass calculations for offloading
if operation == 'offload':
point = Point(component_only['Mass tonne'] / 2, (component_only['Section height m'] + component_only['Offload hook height m']))
else:
point = Point(component_only['Mass tonne'], (component_only['Lift height m'] + component_only['Offload hook height m']))
crane['Lift boolean {component}'.format(component=component)] = polygon.contains(point)
# Transform the "Lift boolean" indexes in the series to a list of booleans
# that signify if the crane can lift a component.
bool_list = list()
for component in component_group['Component']:
if crane['Lift boolean {component}'.format(component=component)] is False:
crane_bool = False
else:
crane_bool = True
bool_list.append(crane_bool)
# mh is an effective mass (it should be the mass of the entire component for both offload and other cranes, not just 1/2 that's used above for determining whether the part can be lifted)
mh = component_group['Mass tonne']
aw = component_group['Surface area sq m'] * component_group['Coeff drag']
vmax_tab = crane['Max wind speed m per s']
vmax_calc = vmax_tab * np.sqrt(1.2 * mh / aw)
# if vmax_calc is less than vmax_tab then vmax_calc, otherwise vmax_tab (based on pg. 33 of Liebherr)
component_group_new = pd.DataFrame(component_group,
columns=list(component_group.columns.values) + ['vmax',
'Crane name',
'Boom system',
'crane_bool'])
component_group_new['vmax'] = np.minimum(vmax_tab, vmax_calc)
component_group_new['Crane name'] = crane['Crane name']
component_group_new['Boom system'] = crane['Boom system']
component_group_new['crane_bool'] = bool_list
component_max_speed = component_max_speed.append(component_group_new, sort=True)
crane_poly_new = crane_poly.copy()
crane_poly_new['Crane bool {}'.format(operation)] = min(bool_list)
result = {
'component_max_speed': component_max_speed,
'crane_poly': crane_poly_new
}
return result
def calculate_wind_delay_by_component(self):
"""
Calculates wind delay for each component in the project.
Returns
-------
pd.DataFrame
crane specifications and component properties joined with wind delays for each case.
"""
# Get necessary values from input_dict
crane_specs = self.output_dict['crane_specs_withoffload']
weather_window = self.input_dict['weather_window']
# calculate wind delay for each component and crane combination
crane_specs = crane_specs.reset_index()
crane_specs['Wind delay percent'] = np.nan
# pull global inputs for weather delay from input_dict
weather_data_keys = {'wind_shear_exponent', 'weather_window'}
# specify collection-specific weather delay inputs
weather_delay_global_inputs = {i: self.input_dict[i] for i in self.input_dict if i in weather_data_keys}
# Iterate over every crane + boom combination
for i, row in crane_specs.iterrows():
# assume we don't know when the operation occurs
operation_window = len(weather_window.index) # operation window = entire construction weather window
operation_start = 0 # start time is at beginning of construction weather window
# extract critical wind speed
critical_wind_operation = row['vmax']
# extract height of interest (differs for offload cranes)
if (row['Crane bool offload'] == 1) is True:
height_interest = row['Section height m'] + row['Offload hook height m']
else:
height_interest = row['Lift height m'] + row['Offload hook height m']
# compute weather delay
weather_delay_input_dict = weather_delay_global_inputs
weather_delay_output_dict = dict()
weather_delay_input_dict['start_delay_hours'] = operation_start
weather_delay_input_dict['critical_wind_speed_m_per_s'] = critical_wind_operation
weather_delay_input_dict['wind_height_of_interest_m'] = height_interest
weather_delay_input_dict['mission_time_hours'] = operation_window
WeatherDelay(weather_delay_input_dict, weather_delay_output_dict)
wind_delay = np.array(weather_delay_output_dict['wind_delays'])
# if greater than 4 hour delay, then shut down for full day (10 hours)
wind_delay[(wind_delay > 4)] = 10
wind_delay_time = float(wind_delay.sum())
# store weather delay for operation, component, crane, and boom combination
crane_specs.loc[i, 'Wind delay percent'] = wind_delay_time / len(weather_window)
self.output_dict['enhanced_crane_specs'] = crane_specs
return crane_specs
def aggregate_erection_costs(self):
"""
Aggregates labor, equipment, mobilization and fuel costs for erection.
Returns
-------
(pd.DataFrame, pd.DataFrame)
Two dataframes: First, utilizing the same crane for base and topping.
Second, utilizing separate cranes for base and topping
"""
join_wind_operation = self.output_dict['join_wind_operation']
overtime_multiplier = self.input_dict['overtime_multiplier']
time_construct = self.input_dict['time_construct']
project_data = self.input_dict['project_data']
hour_day = self.input_dict['hour_day']
# TODO: consider removing equipment name and crane capacity from crane_specs tab (I believe these data are unused here and they get overwritten later with equip information from equip tab)
join_wind_operation = join_wind_operation.drop(columns=['Equipment name', 'Crane capacity tonne'])
possible_crane_cost_with_equip = pd.merge(join_wind_operation, project_data['equip'],
on=['Equipment ID', 'Operation'])
equip_crane_cost = pd.merge(possible_crane_cost_with_equip, project_data['equip_price'],
on=['Equipment name', 'Crane capacity tonne'])
equip_crane_cost['Equipment rental cost USD'] = equip_crane_cost['Total time per op with weather'] * \
equip_crane_cost['Equipment price USD per hour'] * \
equip_crane_cost['Number of equipment']
equipment_cost_to_merge = equip_crane_cost[['Crane name', 'Boom system', 'Equipment ID', 'Operation', 'Equipment price USD per hour', 'Number of equipment', 'Equipment rental cost USD', 'Fuel consumption gal per day']]
equipment_cost_to_merge = equipment_cost_to_merge.groupby(['Crane name', 'Boom system', 'Equipment ID', 'Operation']).sum().reset_index()
possible_crane_cost = pd.merge(join_wind_operation, equipment_cost_to_merge, on=['Crane name', 'Boom system', 'Equipment ID', 'Operation'])
# Merge crew and price data for non-management crews only (base, topping, and offload only)
crew_cost = | pd.merge(project_data['crew'], project_data['crew_price'], on=['Labor type ID']) | pandas.merge |
"""Web interface"""
import re
import base64
import numpy as np
import os
import pandas as pd
from sklearn.manifold import TSNE
import spacy
import streamlit as st
from textblob import TextBlob
import src.analyzer as az
import src.constants as cts
import src.doc_similarity as ds
import src.get_handler as gh
import src.json_util as ju
import src.markdown as md
import src.summarizer as sz
import src.topic_modeling as tm
import src.visualization as vis
# resources/sample_reflections/lab1, resources/sample_reflections/lab2
# initialize main_df and preprocessed_Df
SPACY_MODEL_NAMES = ["en_core_web_sm", "en_core_web_md"]
preprocessed_df = pd.DataFrame()
main_df = pd.DataFrame()
assignments = None
assign_text = None
stu_id = None
success_msg = None
debug_mode = False
def main():
"""main streamlit function"""
# Title
st.sidebar.title("Welcome to GatorMiner!")
data_retreive_method = st.sidebar.selectbox(
"Choose the data retrieving method",
[
"Local file system",
"AWS",
],
)
if retreive_data(data_retreive_method):
analysis_mode = st.sidebar.selectbox(
"Choose the analysis mode",
[
"Home",
"Frequency Analysis",
"Sentiment Analysis",
"Document Similarity",
"Summary",
"Topic Modeling",
"Interactive",
],
)
if debug_mode:
st.write(main_df)
if analysis_mode == "Home":
readme()
else:
if analysis_mode == "Frequency Analysis":
st.title(analysis_mode)
frequency()
elif analysis_mode == "Sentiment Analysis":
st.title(analysis_mode)
sentiment()
elif analysis_mode == "Document Similarity":
st.title(analysis_mode)
doc_sim()
elif analysis_mode == "Summary":
st.title(analysis_mode)
summary()
elif analysis_mode == "Topic Modeling":
st.title(analysis_mode)
tpmodel()
elif analysis_mode == "Interactive":
st.title(analysis_mode)
interactive()
success_msg.empty()
def readme():
"""function to load and configurate readme source"""
with open("README.md") as readme_file:
readme_src = readme_file.read()
for file in os.listdir("resources/images"):
if file.endswith(".png"):
img_path = f"resources/images/{file}"
with open(img_path, "rb") as f:
img_bin = base64.b64encode(f.read()).decode()
readme_src = readme_src.replace(img_path, f"data:image/png;base64,{img_bin}")
st.markdown(readme_src, unsafe_allow_html=True)
def landing_pg():
"""landing page"""
landing = st.sidebar.selectbox("Welcome", ["Home", "Interactive"])
if landing == "Home":
readme()
else:
interactive()
def retreive_data(data_retreive):
"""pipeline to retrieve data from user input to output"""
global preprocessed_df
global main_df
if data_retreive == "Local file system":
input_assignments = st.sidebar.text_input(
"Enter path(s) to markdown documents (seperate by comma)"
)
else:
input_assignments = st.sidebar.text_input(
"Enter assignment names of the markdown \
documents(seperate by comma)"
)
st.sidebar.info(
"You will need to store keys and endpoints in the \
environment variables")
if not input_assignments:
landing_pg()
else:
input_assignments = re.split(r"[;,\s]\s*", input_assignments)
try:
main_df, preprocessed_df = import_data(
data_retreive, input_assignments)
except TypeError:
st.sidebar.warning(
"No data imported. Please check the reflection document input")
readme()
else:
global success_msg
success_msg = None
if main_df.empty is not True:
success_msg = st.sidebar.success("Sucessfully Loaded!!")
global assign_id
assign_id = preprocessed_df.columns[0]
global assignments
assignments = st.sidebar.multiselect(
label="Select assignments below:",
options=main_df[assign_id].unique(),
)
global assign_text
assign_text = ", ".join(assignments)
global stu_id
stu_id = preprocessed_df.columns[1]
return True
@st.cache(allow_output_mutation=True)
def load_model(name):
"""load spacy model"""
return spacy.load(name)
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def import_data(data_retreive_method, paths):
"""pipeline to import data from local or aws"""
json_lst = []
if data_retreive_method == "Local file system":
try:
for path in paths:
json_lst.append(md.collect_md(path))
except FileNotFoundError as err:
st.sidebar.text(err)
readme()
else:
passbuild = st.sidebar.checkbox(
"Only retreive build success records", value=True)
try:
configs = gh.auth_config()
for path in paths:
response = gh.get_request(path, passbuild, **configs)
json_lst.append(ju.clean_report(response))
except (EnvironmentError, Exception) as err:
st.sidebar.error(err)
readme()
# when data is retreived
if json_lst:
raw_df = pd.DataFrame()
for item in json_lst:
single_df = pd.DataFrame(item)
raw_df = pd.concat([raw_df, single_df]).fillna("")
tidy_df = df_preprocess(raw_df)
return tidy_df, raw_df
def df_preprocess(df):
"""build and preprocess (combine, normalize, tokenize) text"""
# filter out first two columns -- non-report content
cols = df.columns[2:]
# combining text into combined column
df["combined"] = df[cols].apply(
lambda row: "\n".join(row.values.astype(str)), axis=1
)
# normalize
df[cts.NORMAL] = df["combined"].apply(lambda row: az.normalize(row))
# tokenize
df[cts.TOKEN] = df[cts.NORMAL].apply(lambda row: az.tokenize(row))
return df
def frequency():
"""main function for frequency analysis"""
freq_type = st.sidebar.selectbox(
"Type of frequency analysis", ["Overall", "Student", "Question"]
)
if freq_type == "Overall":
freq_range = st.sidebar.slider(
"Select a range of Most frequent words", 1, 50, value=25
)
st.sidebar.success(
'To continue see individual frequency analysis select "Student"'
)
st.header(f"Overall most frequent words in **{assign_text}**")
overall_freq(freq_range)
elif freq_type == "Student":
freq_range = st.sidebar.slider(
"Select a range of Most frequent words", 1, 20, value=10
)
st.header(
f"Most frequent words by individual students in **{assign_text}**"
)
student_freq(freq_range)
elif freq_type == "Question":
freq_range = st.sidebar.slider(
"Select a range of Most frequent words", 1, 20, value=10
)
st.header(
f"Most frequent words in individual questions in **{assign_text}**"
)
question_freq(freq_range)
def overall_freq(freq_range):
"""page fore overall word frequency"""
plots_range = st.sidebar.slider(
"Select the number of plots per row", 1, 5, value=3
)
freq_df = pd.DataFrame(columns=["assignments", "word", "freq"])
# calculate word frequency of each assignments
for item in assignments:
# combined text of the whole assignment
combined_text = " ".join(
main_df[main_df[assign_id] == item][cts.NORMAL]
)
item_df = pd.DataFrame(
az.word_frequency(combined_text, freq_range),
columns=["word", "freq"],
)
item_df["assignments"] = item
freq_df = freq_df.append(item_df)
# plot all the subplots of different assignments
st.altair_chart(
vis.facet_freq_barplot(
freq_df, assignments, "assignments", plots_per_row=plots_range
)
)
def student_freq(freq_range):
"""page for individual student's word frequency"""
students = st.multiselect(
label="Select specific students below:",
options=main_df[stu_id].unique(),
)
plots_range = st.sidebar.slider(
"Select the number of plots per row", 1, 5, value=3
)
freq_df = pd.DataFrame(columns=["student", "word", "freq"])
stu_assignment = main_df[
(main_df[stu_id].isin(students))
& main_df[assign_id].isin(assignments)
]
if len(students) != 0:
for student in students:
for item in assignments:
individual_freq = az.word_frequency(
stu_assignment[
(stu_assignment[assign_id] == item)
& (stu_assignment[stu_id] == student)
]
.loc[:, ["combined"]]
.to_string(),
freq_range,
)
ind_df = pd.DataFrame(individual_freq, columns=["word", "freq"])
ind_df["assignments"] = item
ind_df["student"] = student
freq_df = freq_df.append(ind_df)
st.altair_chart(
vis.facet_freq_barplot(
freq_df,
students,
"student",
color_column="assignments",
plots_per_row=plots_range,
)
)
def question_freq(freq_range):
"""page for individual question's word frequency"""
# drop columns with all na
select_preprocess = preprocessed_df[
preprocessed_df[assign_id].isin(assignments)
].dropna(axis=1, how="all")
questions = st.multiselect(
label="Select specific questions below:",
options=select_preprocess.columns[2:],
)
plots_range = st.sidebar.slider(
"Select the number of plots per row", 1, 5, value=1
)
freq_question_df = pd.DataFrame(columns=["question", "word", "freq"])
select_text = {}
for question in questions:
select_text[question] = main_df[question].to_string(
index=False, na_rep=""
)
question_df = pd.DataFrame(
select_text.items(), columns=["question", "text"]
)
if len(questions) != 0:
for question in questions:
quest_freq = az.word_frequency(
question_df[question_df["question"] == question]
.loc[:, ["text"]]
.to_string(),
freq_range,
)
ind_df = | pd.DataFrame(quest_freq, columns=["word", "freq"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from accessibility_analyzing.accessibility_calculator import accessibility_calculator as AC #作为程序入口,暂时写成绝对引用
from accessibility_analyzing import utlis as ut
def deprived_changed(mintime, maxtime, timegap, deprived_boundary, entro_type, research_area_file,
npy_file=r"D:\pyprojectlbw\odtime_generate\datarep\2198_2197_night_sz.npy",
delete_shp_file=r'D:\multicities\data\深圳分区\水库_Clip.shp',
is_timefeecost_model=False,
demography_index='Sum_PEO',
target_index='index'):
"""
查询随着时间变化到底有多少人会被剥夺
deprived_boundary: 被剥夺的可达性边界
type: 选择何种机会
"""
for each in range(mintime, maxtime+timegap,timegap): #TODO 这里可以解耦,单独再写一个模块,后面同样的逻辑会用到很多次
ac = AC(target_index=target_index,npy_file=npy_file,research_area_file=research_area_file,demography_index=demography_index,
delelte_shp_file=delete_shp_file,opportunity_index=entro_type,time_boundary=each,
deprived_boundary=deprived_boundary,is_timefeecost_model=is_timefeecost_model)
df_temp = ac.to_dataframe()
temp = df_temp['deprived_pop'].sum() #查看有多少人被剥夺
temp1 = df_temp[demography_index].sum()
print(temp,temp1,temp/temp1)
yield each, temp/temp1
# print(temp)
def plot_comparison(mintime_range,maxtime_range,time_gap,deprived_boundary=0.05,
is_timefeecost_model=False,delete_shp_file=None,
filepath=r'C:\Users\43714\Desktop\temp.png',
npy_file=r"D:\pyprojectlbw\odtime_generate\datarep\2198_2197_night_sz.npy",
npy_file1 = r"D:\pyprojectlbw\odtime_generate\datarep\2198_2197_night_sz.npy",
oppor_index_list=['entr_0_per','entr_1_per','entr_2_1_p'],
color_list=['cornflowerblue','orangered','lightgreen'],
color_list1 = ['mediumblue','darkred','forestgreen'],
research_area_file=r'D:\multicities\data\深圳分区\sz_10_acc_entro.shp',
demo_index="Sum_PEO",
target_index='index'):
"""
两期时间节点下的,剥夺人群对比绘图模块
"""
fig = plt.figure()
axe = plt.subplot()
# color = color_list
entroy_index = oppor_index_list
for each, each1, each1_1 in zip(entroy_index, color_list, color_list1):
x_label = []
y_label = []
for each2 in deprived_changed(mintime_range, maxtime_range, time_gap, deprived_boundary, each,
delete_shp_file=delete_shp_file,
is_timefeecost_model=is_timefeecost_model,
npy_file=npy_file,
research_area_file=research_area_file,
demography_index=demo_index,
target_index=target_index):
x_label.append(each2[0] / 60)
y_label.append(each2[1])
l, = axe.plot(x_label, y_label, color=each1, linestyle=':',linewidth=1)
l.set_label(' ')
x_label = []
y_label = []
for each2 in deprived_changed(mintime_range, maxtime_range, time_gap, deprived_boundary, each,
delete_shp_file=delete_shp_file,
is_timefeecost_model=is_timefeecost_model,
npy_file=npy_file1,
research_area_file=research_area_file,
demography_index=demo_index,
target_index=target_index):
x_label.append(each2[0] / 60)
y_label.append(each2[1])
l, = axe.plot(x_label, y_label, color=each1_1,linewidth=1,)
l.set_label(' ')
axe.axvline(x=55, ls='-.', c='grey')
axe.grid(True)
plt.legend()
plt.yticks([x for x in np.arange(0, 1.2, 0.2)], ('0', '20%', '40%', '60%', '80%', '100%'))
plt.xticks([x for x in np.arange(mintime_range / 60, (maxtime_range + time_gap) / 60,
time_gap / 60)], ('30', '35', '40', '45', '50', '55', '60', '65', '70', '75'
, '80', '85', '90',))
plt.savefig(filepath, dpi=300)
plt.show()
def plot(mintime_range,maxtime_range,time_gap,deprived_boundary=0.05,
is_timefeecost_model=False,delete_shp_file=None,
filepath=r'C:\Users\43714\Desktop\temp.png',
npy_file=r"D:\pyprojectlbw\odtime_generate\datarep\2198_2197_night_sz.npy",
oppor_index_list=['entr_0_per','entr_1_per','entr_2_1_p'],
color_list=['cornflowerblue','orangered','lightgreen'],
research_area_file=r'D:\multicities\data\深圳分区\sz_10_acc_entro.shp',
demo_index="Sum_PEO",
target_index='index'):
"""
绘图模块
"""
fig = plt.figure()
axe = plt.subplot()
color = color_list
entroy_index = oppor_index_list
for each,each1 in zip(entroy_index,color):
x_label = []
y_label = []
for each2 in deprived_changed(mintime_range,maxtime_range,time_gap,deprived_boundary,each,
delete_shp_file=delete_shp_file,
is_timefeecost_model=is_timefeecost_model,
npy_file=npy_file,
research_area_file=research_area_file,
demography_index=demo_index,
target_index=target_index):
x_label.append(each2[0]/60)
y_label.append(each2[1])
l, =axe.plot(x_label,y_label,color = each1,)
l.set_label(' ')
axe.axvline(x=55,ls='-.',c='grey')
axe.grid(True)
plt.legend()
plt.yticks([x for x in np.arange(0,1.2,0.2)],('0','20%','40%','60%','80%','100%'))
plt.xticks([x for x in np.arange(mintime_range/60,(maxtime_range+time_gap)/60,
time_gap/60)],('30','35','40','45','50','55','60','65','70','75'
,'80','85','90',))
plt.savefig(filepath,dpi=300)
plt.show()
def deprived_stat_cal(shp_file_dir='./datarep/sz_access_dir',target_index = 'index',panda_dic=dict(),
deprived_index='deprived_p',demo_index="Sum_PEO",
excel_save_path=r'./datarep/',
divided_region_dir=r'D:\multicities\data\深圳分区\分区去重结果\最终结果1',
divided_rigion_index=2,
reindex_index = None,
excel_file_name = 'results_deprived'
):
'''
shp_file_dir 为存放已经完成可达性计算的shp文件之路径名称
'''
for shp_file, _ in ut.iter_shpfile(shp_file_dir, ['.shp']):
temp_str = '剥夺占比'+_
temp_str1 = '名称'+_
panda_dic[temp_str] = []
panda_dic[temp_str1] = []
AC_result = ut.read_file(shp_file)
panda_dic[temp_str].append(AC_result[deprived_index].sum()/AC_result[demo_index].sum())
panda_dic[temp_str1].append('全市')
for each,file_name in ut.generate_district_indexlist(dir_name=divided_region_dir,target_index=divided_rigion_index):
t = AC_result[AC_result[target_index].isin(each)] #按行政区进行划分的最主要逻辑,
t = t[deprived_index].sum()/t[demo_index].sum()
panda_dic[temp_str].append(t)
panda_dic[temp_str1].append(file_name)
df = | pd.DataFrame.from_dict(panda_dic) | pandas.DataFrame.from_dict |
###### GET THE MUSIC VIDEOS FROM YOUTUBE AND SAVE TO GOOGLE SHEET ######
import config
from requests import get
import math
import pandas as pd
import gspread_pandas as gspd
import logging
logging.basicConfig(filename='similarbands.log', level=logging.INFO, format='%(levelname)s:%(name)s:%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
def get_yt_likes(playlists=config.playlists,yt_key=config.yt_key,debug=0):
###### PARSE VIDEOS ######
def parse_videos(page,df,response):
# loop through all videos
for item in response['items']:
#put id, title, description into df
video_id = item['snippet']['resourceId']['videoId']
video_url = config.yt_baseurl + item['snippet']['resourceId']['videoId']
video_title = item['snippet']['title']
video_description = item['snippet']['description']
df_video = pd.DataFrame.from_dict({'id':[video_id], 'url':[video_url], 'title':[video_title], 'description':[video_description]})
df = df.append(df_video, ignore_index=True)
return df
###########################
df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: | find_common_type(row.values) | pandas.core.dtypes.cast.find_common_type |
import numpy as np
import pandas as pd
from pywt import wavedec
from zipfile import ZipFile
from statsmodels.robust.scale import mad as medianAD
def get_class_and_frequence(path: str) -> (int, int):
'''
`path` é uma str no modelo: 'pasta/subpasta/arquivo'.
O retorno é uma tupla contendo `(classe, frequência)`,
onde os valores estão presentes nos nomes da subpasta
e arquivo, respectivamente.
'''
_, class_str, freq_str = path.split('/')
# A classe é o ultimo caractere da string
class_int = int(class_str[-1])
# O nome do arquivo separa 4 valores pelo char 'c' (V0cV1cV2cV3.csv)
# No qual a frequência é o terceiro valor, V2
freq_int = int(freq_str.split('c')[2])
return (class_int, freq_int)
def energy(vec:np.ndarray) -> np.float64:
return np.square(vec).sum()
def create_fs20(vec:np.ndarray, file_path:str) -> pd.DataFrame:
'''
Dado um sinal (`vec`) e o nome do arquivo de origem (`file_path`),
retorna um dataframe de 1 linha com os atributos do "Feature Set 20" extraidos.
Feature Set 20:
---
+ MeanAD D3, MeanAD D4, MeanAD A5;
+ MedianAD D3, MedianAD D4, MedianAD D5, MedianAD A5;
+ Energia D3, Energia D4, Energia D5, Energia A5;
+ Kurt D5, Kurt A5;
+ Skew D4
+ Frequency;
'''
result_df = pd.DataFrame()
# tupla de coeficientes: (A5, D5, D4, ..., D1)
dwt_coefs = wavedec(data=vec, wavelet='db2', level=5)
# meanAD A5, D4, D3
for index, coef in zip([0, 2, 3], ['A5', 'D4', 'D3']):
result_df[f'MeanAD-{coef}'] = pd.DataFrame(dwt_coefs[index]).mad()
# medianAD A5, D5, D4, D3 e Energia A5, D5, D4, D3
for index, coef in zip([0, 1, 2, 3], ['A5', 'D5', 'D4', 'D3']):
result_df[f'MedianAD-{coef}'] = medianAD(dwt_coefs[index])
result_df[f'Energy-{coef}'] = energy(dwt_coefs[index])
# Kurtosis A5
result_df['Kurt-A5'] = pd.DataFrame(dwt_coefs[0]).kurt()
# Kurtosis D5
result_df['Kurt-D5'] = | pd.DataFrame(dwt_coefs[1]) | pandas.DataFrame |
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(svd_topic_vectors, sms.spam, test_size=0.5, random_state=271828)
lda = LDA(n_components=1)
lda = lda.fit(X_train, y_train)
sms['svd16_spam'] = lda.predict(pca_topic_vectors)
from nlpia.data.loaders import get_data
sms = get_data('sms-spam')
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import casual_tokenize
tfidf = TfidfVectorizer(tokenizer=casual_tokenize)
tfidf_docs = tfidf.fit_transform(raw_documents=sms.text).toarray()
tfidf_cov = tfidf_docs.dot(tfidf_docs.T)
from sklearn.decomposition import TruncatedSVD
from seaborn import plt
svd = TruncatedSVD(16)
svd = svd.fit(tfidf_cov)
svd_topic_vectors = svd.transform(tfidf_cov)
import pandas as pd
svd_topic_vectors = pd.DataFrame(svd_topic_vectors,
columns=['topic{}'.format(i) for i in range(16)])
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(svd_topic_vectors, sms.spam, test_size=0.5, random_state=271828)
lda = LDA(n_components=1)
lda = lda.fit(X_train, y_train)
sms['svd16_spam'] = lda.predict(svd_topic_vectors)
round(float(lda.score(X_test, y_test)), 3)
hist - o - p
hist
svd_topic_vectors
# %paste
# >>> svd = TruncatedSVD(16) # <1>
# >>> svd = svd.fit(tfidf_cov)
# >>> svd_topic_vectors = svd.transform(tfidf_cov)
# >>> svd_topic_vectors = pd.DataFrame(svd_topic_vectors,
# columns=['topic{}'.format(i) for i in range(16)])
svd_topic_vectors.head()
tfidf_cov
pd.DataFrame(tfidf_cov, columns=['doc{}'.format(i) for i in range(len(tfidf_cov))])
columns = ['doc{}'.format(i) for i in range(len(tfidf_cov))]
pd.DataFrame(tfidf_cov, columns=columns, index=index)
pd.DataFrame(tfidf_cov, columns=columns, index=columns)
| pd.DataFrame(tfidf_cov, columns=columns, index=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import json
import datetime
import numpy as np
import pandas as pd
from pandas import json_normalize
import sqlalchemy as sq
import requests
from oanda.oanda import Account # oanda_v20_platform.
import os.path
import logging
from utils.fileops import get_abs_path
# TODO add updated to the database and have a check to update each day
class MarketData(Account):
"""Creates a sqlite database of current market information - for use by the trading strategies.
DB Browser https://sqlitebrowser.org/ can be used for easy viewing and filtering.
Focused on daily data it incudes for every tradable instrument in a table with:
The Last 60 days of data
Yesterdays Volume, Open, High, Low, and Close
The 55 day Vol, O, H, L, C
The 20 day Vol, O, H, L, C
The 10 day Vol, O, H, L, C
True Range for each day - a volatility measure that captures gaps
N the 20 day average True Range - like ATR(20)
And a summary table of market data (called marketdata) required for trading effectively,
which includes the following information:
Trading costs such as financing rates and the days they are applied.
Pip positions (decimal points) for each instrument
Margin rates
Max and Min Trailing stop distances
Maximum order sizes
The average spread
The volatility (as N)
The spread percentage of N - enabling the selection of a trading range where trade costs are minimised
e.g. if spread is 20 and stop loss (SP) and take profit (TP) are 100 your trading edge has
to be able to overcome that ~20% cost to have any chance of succeeding - some of the instruments
with high spread % N are very hard (impossible) to trade profitably without a crystall ball.
The N per 100X spread provides a quick way to get the target trading range where the spread cost will
be ~1% e.g. US30_USD currently has a Nper100Spread of 1.92 and an N of 380 so if TP and SP are set to
380/1.92=198 pips you will only lose ~1% in spread cost and with the daily range at 380 you should
hit one of the targets in a day or so. Compared to say USD_JPY which currently has a N of 0.60 and
a Nper100Spread of 0.4 so if spread cost is kept to ~1% it will be a move of 1.5 (0.6/0.4) more like
3-4 days before a target will be hit. This column can be sorted to get a top 10 of instruments that
are efficeint to trade.
The asset class and base currency
Args:
db_path str, default='data/marketdata.db':
The path to the database from the directory where this class is being run.
"""
def __init__(self, db_path=get_abs_path(['oanda_v20_platform','data', 'marketdata.db']), **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(__name__)
# setup connection to the database
self.db_path=db_path
self.engine = sq.create_engine(f'sqlite:///{self.db_path}')
# does the db exist if not create it by connecting
if not os.path.isfile(self.db_path):
conn = self.engine.connect()
conn.execute("commit")
conn.close()
self.logger.info(f"Empty MarketData database created at: {self.db_path}")
# get todays date
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
try: # do we need to update marketdata?
sql = """SELECT DISTINCT(Updated) FROM marketdata;"""
data_date= pd.read_sql_query(sql, con=self.engine)
except: # only an empty db exists - build db
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data added to the database")
# is marketdata out of date?
if data_date.loc[0].item() != self.today:
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data updated in the database")
else: # get the marketdata
df = pd.read_sql_query(sql="""SELECT name, type, marginRate, N, avgSpread,
"financing.longRate", "financing.shortRate",
"Spread % N"
FROM marketdata """,
con=self.engine)
self.marketdata = df[['name', 'type', 'marginRate', 'N', 'avgSpread',
'financing.longRate', 'financing.shortRate',
'Spread % N']].sort_values(by='Spread % N')
def get_core_assets(self):
pass
self.core = pd.read_sql_query(sql="""SELECT DISTINCT Base Currency, Asset FROM marketdata""", con=self.engine)
self.core_list = self.core['Instrument'].to_list()
def build_db(self):
# add data to the instruments
for i in self.instruments['instruments']:
ix = i['name']
self.logger.info(f"Collecting market data for {ix}")
# add the spread data for each instrument
i['avgSpread'] = self.avg_spread(self.spreads(ix))
# get the price data
df = self.make_dataframe(self.get_daily_candles(ix))
i['volume'] = df.iloc[0, 0]
i['open'] = df.iloc[0, 1]
i['high'] = df.iloc[0, 2]
i['low'] = df.iloc[0, 3]
i['close'] = df.iloc[0, 4]
i['True Range'] = df.iloc[0, 5]
i['N'] = df.iloc[0, 6]
i['55DayHigh'] = df.iloc[0, 7]
i['20DayHigh'] = df.iloc[0, 8]
i['10DayHigh'] = df.iloc[0, 9]
i['55DayLow'] = df.iloc[0, 10]
i['20DayLow'] = df.iloc[0, 11]
i['10DayLow'] = df.iloc[0, 12]
tags = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['tags']
for l in x:
tags.loc[n, 'Asset Class'] = l['name']
fDayWeek = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['financing']['financingDaysOfWeek']
for d in x:
fDayWeek.loc[n, d['dayOfWeek'] + '-financing'] = d['daysCharged']
tags = tags.merge(fDayWeek, left_index=True, right_index=True)
df = json_normalize(self.instruments['instruments'])
df.drop(['tags', 'financing.financingDaysOfWeek'], inplace=True, axis=1)
df = df.merge(tags, left_index=True, right_index=True)
df['Spread % N'] = round(((df['avgSpread'] * 10.00**df['pipLocation']) / df['N'])*100, 2)
df['Nper100spread'] = df['N'] / ((df['avgSpread'] * 10.00**df['pipLocation']) * 100)
df['Base Currency'] = df.apply(lambda x: self.base(x), axis=1)
df['Asset'] = df.apply(lambda x: self.asset(x), axis=1)
df['Updated'] = self.today
df.to_sql('marketdata', con=self.engine, if_exists='replace')
def base(self, x):
return x['name'].split('_')[1]
def asset(self, x):
return x['name'].split('_')[0]
def get_instruments(self, params=None):
"""Get instruments and there associated static data.
By default gets the core instruments stored in a csv. These core
instruments are the unique available instruments.
Returns:
json: contains data that describes the available instruments
"""
url = self.base_url + '/v3/accounts/' + self.account + '/instruments'
r = requests.get(url, headers=self.headers)
self.logger.debug(f"Get Instruments returned {r} status code")
data = r.json()
return data
def avg_spread(self, spreads_json):
"""Calculate the average spread from the json returned by spreads
Args:
spreads_json: json produced by spreads function
Returns:
float: average of the average spreads
"""
spreads = []
for li in spreads_json['avg']:
spreads.append(li[1])
return np.mean(spreads)
def spreads(self, instrument, period=86400):
"""Returns a json with timestamps for every 15min
with the min, max and average spread.
Args:
instrument: str, required, e.g. "EUR_USD"
period: int, time period in seconds e.g. 86400 for day
Returns:
json: { "max": [[1520028000, 6], .....],
"avg": [[1520028000, 3.01822], ......],
"min": [[1520028000, 1.7], ......]
}
"""
params = {
"instrument": instrument,
"period": period
}
url = self.base_url + '/labs/v1/spreads/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Spreads function returned {r} status code")
data = r.json()
return data
def get_daily_candles(self, instrument):
"""Request the daily candle data from the API
get 60 candles from yesterday
Args:
instrument: string describing the instrument in API
Returns:
json: candle data
"""
yesterday = (datetime.datetime.now() - pd.DateOffset(days=1)).strftime("%Y-%m-%d")
last_candle = yesterday + 'T22:00:00.000000000Z'
params = {
"to": last_candle,
"count": 60,
"granularity": "D",
# "includeFirst": True,
}
url = self.base_url + f'/v3/instruments/{instrument}/candles/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Get daily candles returned {r} status code")
data = r.json()
return data
def make_dataframe(self, candles_data):
"""Take a json of candle data -
convert to a dataframe, calculate volatility,
max and min prices
Args:
candles_data ([json]): takes the json returned from get_candles
Returns:
sends data to sql table
pandas df: the last line of data
"""
df = json_normalize(candles_data.get('candles'))
df.rename(columns={'mid.c': 'close', 'mid.h': 'high',
'mid.l': 'low', 'mid.o': 'open'},
inplace=True)
df.set_index('time', inplace=True)
# the API returns strings these need to be converted to floats
df.volume = pd.to_numeric(df.volume)
df.close = | pd.to_numeric(df.close) | pandas.to_numeric |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import pandas as pd
import numpy as np
from .moment import ClassificationMoment
from .moment import _GROUP_ID, _LABEL, _PREDICTION, _ALL, _EVENT, _SIGN
from fairlearn._input_validation import _MESSAGE_RATIO_NOT_IN_RANGE
from .error_rate import ErrorRate
_UPPER_BOUND_DIFF = "upper_bound_diff"
_LOWER_BOUND_DIFF = "lower_bound_diff"
_MESSAGE_INVALID_BOUNDS = "Only one of difference_bound and ratio_bound can be used."
_DEFAULT_DIFFERENCE_BOUND = 0.01
class ConditionalSelectionRate(ClassificationMoment):
"""Generic fairness moment for selection rates.
This serves as the base class for both :class:`DemographicParity`
and :class:`EqualizedOdds`. The two are distinguished by
the events they define, which in turn affect the
`index` field created by :meth:`load_data()`.
The `index` field is a :class:`pandas:pandas.MultiIndex` corresponding to the rows of
the DataFrames either required as arguments or returned by several
of the methods of the `ConditionalSelectionRate` class. It is the cartesian
product of:
- The unique events defined for the particular object
- The unique values for the sensitive feature
- The characters `+` and `-`, corresponding to the Lagrange multipliers
for positive and negative violations of the constraint
The `ratio` specifies the multiple at which error(A = a) should be compared with total_error
and vice versa. The value of `ratio` has to be in the range (0,1] with smaller values
corresponding to weaker constraint. The `ratio` equal to 1 corresponds to the constraint
where error(A = a) = total_error
"""
def __init__(self, *, difference_bound=None, ratio_bound=None, ratio_bound_slack=0.0):
"""Initialize with the ratio value."""
super(ConditionalSelectionRate, self).__init__()
if (difference_bound is None) and (ratio_bound is None):
self.eps = _DEFAULT_DIFFERENCE_BOUND
self.ratio = 1.0
elif (difference_bound is not None) and (ratio_bound is None):
self.eps = difference_bound
self.ratio = 1.0
elif (difference_bound is None) and (ratio_bound is not None):
self.eps = ratio_bound_slack
if not (0 < ratio_bound <= 1):
raise ValueError(_MESSAGE_RATIO_NOT_IN_RANGE)
self.ratio = ratio_bound
else:
raise ValueError(_MESSAGE_INVALID_BOUNDS)
def default_objective(self):
"""Return the default objective for moments of this kind."""
return ErrorRate()
def load_data(self, X, y, event=None, utilities=None, **kwargs):
"""Load the specified data into this object.
This adds a column `event` to the `tags` field.
The `utilities` is a 2-d array which correspond to g(X,A,Y,h(X)) as mentioned in the paper
`Agarwal et al. (2018) <https://arxiv.org/abs/1803.02453>`. The `utilities` defaults to
h(X), i.e. [0, 1] for each X_i. The first column is G^0 and the second is G^1.
Assumes binary classification with labels 0/1.
.. math::
utilities = [g(X,A,Y,h(X)=0), g(X,A,Y,h(X)=1)]
"""
super().load_data(X, y, **kwargs)
self.tags[_EVENT] = event
if utilities is None:
utilities = np.vstack([np.zeros(y.shape, dtype=np.float64),
np.ones(y.shape, dtype=np.float64)]).T
self.utilities = utilities
self.prob_event = self.tags.groupby(_EVENT).size() / self.total_samples
self.prob_group_event = self.tags.groupby(
[_EVENT, _GROUP_ID]).size() / self.total_samples
signed = pd.concat([self.prob_group_event, self.prob_group_event],
keys=["+", "-"],
names=[_SIGN, _EVENT, _GROUP_ID])
self.index = signed.index
self.default_objective_lambda_vec = None
# fill in the information about the basis
event_vals = self.tags[_EVENT].dropna().unique()
group_vals = self.tags[_GROUP_ID].unique()
# The matrices pos_basis and neg_basis contain a lower-dimensional description of
# constraints, which is achieved by removing some redundant constraints.
# Considering fewer constraints is not required for correctness, but it can dramatically
# speed up GridSearch.
self.pos_basis = pd.DataFrame()
self.neg_basis = pd.DataFrame()
self.neg_basis_present = pd.Series(dtype='float64')
zero_vec = | pd.Series(0.0, self.index) | pandas.Series |
import torch.optim as optim
import torch.nn as nn
import numpy as np
import pandas as pd
import configparser
import os
import torch
from cfr_net import CFRNet, WrappedDataLoader
def init(path=None):
config = configparser.ConfigParser()
if path is None:
config['data'] = {'input_dir': './data/IBM',
'output_dir': './results',
'val_ratio': 0.3,
'n_test_samples': 10000,
'use_input': 'all',
}
config['model'] = {'repre_layers': '[200,200,200]',
'pred_layers': '[100,100,100]',
'cuda': 0,
'bn': False
}
config['loss'] = {'alpha': 1,
'eps': 1e-3,
'max_iter': 10
}
config['training'] = {'max_epochs': 3000,
'min_lr': 1e-6 + 1e-7,
'train_batch_size': 1000,
'test_batch_size': 1000,
'optimizer': 'sgd',
'lr': 1e-3,
'weight_decay': 1e-4,
'momentum': 0.9,
'nesterov': True,
'verbose': 1,
'patience': 20,
'cooldown': 20
}
config['query'] = {'strategy': 'random',
'n_init': 1000,
'n_query_per_turn': 1000,
'n_query_max': 20000,
'n_set_size': 1,
'use_phi': False
}
config['log'] = {'n_epochs_print': 50}
else:
config.read('config.ini')
return config
def get_inputs(config):
files = os.listdir(config['data']['input_dir'])
list.sort(files)
if config['data']['use_input'] != 'all':
s, e = eval(config['data']['use_input'])
files = files[s:min(e, len(files))]
return files
def get_test_loader(test_data, test_batch_size):
test_X = test_data.iloc[:, 5:].values
test_y0, test_y1 = test_data['mu0'].values, test_data['mu1'].values
test_treated_dl = WrappedDataLoader(test_X, np.ones(test_X.shape[0]), test_y1, test_batch_size, False)
test_control_dl = WrappedDataLoader(test_X, np.zeros(test_X.shape[0]), test_y0, test_batch_size, False)
return test_treated_dl, test_control_dl
def get_train_loader(train_data, train_batch_size):
t = train_data['treatment'] == 1
train_all_treated_dl = WrappedDataLoader(train_data[t].iloc[:, 5:].values,
t.values.nonzero()[0],
np.ones(t.sum()),
train_batch_size, False)
t = train_data['treatment'] == 0
train_all_control_dl = WrappedDataLoader(train_data[t].iloc[:, 5:].values,
t.values.nonzero()[0],
np.ones(t.sum()),
train_batch_size, False)
return train_all_treated_dl, train_all_control_dl
def get_budgets(n_init, n_query_per_turn, n_query_max):
tmp = list(range(n_init + n_query_per_turn, n_query_max + 1,
n_query_per_turn)) if type(n_query_per_turn) == int else n_query_per_turn
budgets = [n_init] + [k for k in tmp if n_init < k <= n_query_max]
return budgets
def get_models(input_dim, config):
lr = eval(config['training']['lr'])
n_repre_layers = eval(config['model']['repre_layers'])
n_pred_layers = eval(config['model']['pred_layers'])
bn = eval(config['model']['bn'])
model = CFRNet(input_dim, n_repre_layers, n_pred_layers, bn)
weight_decay = eval(config['training']['weight_decay'])
if config['training']['optimizer'] == 'adam':
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=eval(config['training']['momentum']),
nesterov=eval(config['training']['nesterov']), weight_decay=weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, cooldown=10)
return model, optimizer, scheduler
def compute_rmse(model, dl, device):
model.eval()
with torch.no_grad():
criterion = nn.MSELoss(reduction='sum')
mse = sum(criterion(model(xb.to(device), tb.to(device)), yb.to(device)) for xb, tb, yb in
dl) / dl.get_X_size()[0]
return np.sqrt(mse.item())
def compute_sqrt_pehe(model, treated_dl, control_dl, device):
model.eval()
n_samples = treated_dl.get_X_size()[0]
with torch.no_grad():
criterion = nn.MSELoss(reduction='sum')
mse_treated = sum(criterion(model(xb.to(device), tb.to(device)), yb.to(device)) for xb, tb, yb in
treated_dl) / n_samples
mse_control = sum(criterion(model(xb.to(device), tb.to(device)), yb.to(device)) for xb, tb, yb in
control_dl) / n_samples
pehe2 = sum(
criterion(model(xy1[0].to(device), xy1[1].to(device)) - model(xy0[0].to(device), xy0[1].to(device)),
xy1[2].to(device) - xy0[2].to(device)) for xy1, xy0 in
zip(treated_dl, control_dl)) / n_samples
return np.sqrt(pehe2.item()), np.sqrt(mse_treated.item()), np.sqrt(mse_control.item())
def choose_new_idx(start, end, selected, length):
return list(np.random.choice(list(set(range(start,end))-set(selected)),min(length,end-start-len(selected)),replace=False))
def save_cont_results(model, test_treated_dl, test_control_dl, device, file, results, predictions, num_data, output_path):
sqrt_pehe, rmse_treated, rmse_control = compute_sqrt_pehe(model, test_treated_dl, test_control_dl, device)
print('test set: treated_rmse = {} control_rmse = {} sqrt_pehe = {}'.format(rmse_treated, rmse_control, sqrt_pehe))
results.append([file, num_data, sqrt_pehe, rmse_treated, rmse_control])
| pd.DataFrame(results, columns=['file_name', 'budget', 'sqrt_pehe', 'rmse_treated', 'rmse_control']) | pandas.DataFrame |
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
from ..cheval import LinkedDataFrame
vehicles_data = {
'household_id': [0, 0, 1, 2, 3],
'vehicle_id': [0, 1, 0, 0, 0],
'manufacturer': ['Honda', 'Ford', 'Ford', 'Toyota', 'Honda'],
'model_year': [2009, 2005, 2015, 2011, 2013],
'km_travelled': [103236, 134981, 19015, 75795, 54573]
}
households_data = {
'household_id': [0, 1, 2, 3],
'dwelling_type': ['house', 'apartment', 'house', 'house'],
'drivers': [4, 1, 2, 3]
}
def test_link_to():
vehicles = LinkedDataFrame(vehicles_data)
households = LinkedDataFrame(households_data)
vehicles.link_to(households, 'household', on='household_id')
households.link_to(vehicles, 'vehicles', on='household_id')
test_result = households.vehicles.sum("km_travelled")
expected_result = pd.Series({0: 238217, 1: 19015, 2: 75795, 3: 54573})
assert_series_equal(test_result, expected_result)
def test_slicing():
vehicles = LinkedDataFrame(vehicles_data)
households = LinkedDataFrame(households_data)
vehicles.link_to(households, 'household', on='household_id')
households.link_to(vehicles, 'vehicles', on='household_id')
mask = vehicles['household_id'] == 0
vehicles_subset = vehicles.loc[mask].copy()
vehicles_subset['dwelling_type'] = vehicles_subset.household.dwelling_type
test_result = vehicles_subset['dwelling_type']
expected_result = pd.Series({0: 'house', 1: 'house'}, name='dwelling_type')
assert_series_equal(test_result, expected_result)
def test_evaluate():
vehicles = LinkedDataFrame(vehicles_data)
households = LinkedDataFrame(households_data)
vehicles.link_to(households, 'household', on='household_id')
households.link_to(vehicles, 'vehicles', on='household_id')
vehicles['multiple_drivers'] = False
vehicles.evaluate('where(household.drivers > 1, True, False)', out=vehicles['multiple_drivers'])
test_result = vehicles['multiple_drivers']
expected_result = | pd.Series({0: True, 1: True, 2: False, 3: True, 4: True}, name='multiple_drivers') | pandas.Series |
"""
Logistic Regression based upon sklearn.
"""
import datatable as dt
import numpy as np
import random
import pandas as pd
import os
import copy
import codecs
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score, make_scorer
from h2oaicore.models import CustomModel
from h2oaicore.systemutils import config, physical_cores_count, save_obj_atomically, load_obj, DefaultOrderedDict
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning
from h2oaicore.transformers import CatOriginalTransformer, FrequentTransformer, CVTargetEncodeTransformer
from h2oaicore.transformer_utils import Transformer
from h2oaicore.transformers_more import CatTransformer, LexiLabelEncoderTransformer
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.ensemble import VotingClassifier
class LogisticRegressionModel(CustomModel):
"""
Logistic Regression
Useful when weak or no interactions between features,
or large inherent number of levels in categorical features
Other useful DAI options if want to only use feature made internally by this model:
config.prob_prune_genes = False
config.prob_prune_by_features = False
# Useful if want training to ultimately see all data with validated max_iter
config.fixed_ensemble_level=0
Recipe to do:
1) Add separate LogisticRegressionEarlyStopping class to use warm start to take iterations a portion at a time,
and score with known/given metric, and early stop to avoid overfitting on validation.
2) Improve bisection stepping for search
3) Consider from deployml.sklearn import LogisticRegressionBase
4) Implement LinearRegression/ElasticNet (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model)
5) Implement other categorical missing encodings (same strategies as numerics)
6) Implement other scorers (i.e. checking score_f_name -> sklearn metric or using DAI metrics)
"""
_kaggle = False # some kaggle specific optimizations for https://www.kaggle.com/c/cat-in-the-dat
# with _kaggle_features=False and no catboost features:
# gives 0.8043 DAI validation for some seeds/runs,
# which leads to 0.80802 public score after only 2 minutes of running on accuracy=2, interpretability=1
# with _kaggle_features=False and catboost features:
# gives 0.8054 DAI validation for some seeds/runs,
# which leads to 0.80814 public score after only 10 minutes of running on accuracy=7, interpretability=1
# whether to generate features for kaggle
# these features do not help the score, but do make sense as plausible features to build
_kaggle_features = False
# whether to use validation and train together (assumes test with sample_weight=0 already part of train+valid) for features
_kaggle_mode = False
# numerical imputation for all columns (could be done per column chosen by mutations)
_impute_num_type = 'sklearn' # best for linear models
# _impute_num_type = 'oob' # risky for linear models, but can be used for testing
_impute_int_type = 'oob'
_impute_bool_type = 'oob'
_oob_bool = False
# categorical imputation for all columns (could be done per column chosen by mutations)
_impute_cat_type = 'oob'
_oob_cat = "__OOB_CAT__"
# unique identifier for OHE feature names
_ohe_postfix = "_*#!^()^{}"
# not required to be this strict, but good starting point to only use this recipe's features
_included_transformers = ['CatOriginalTransformer', 'OriginalTransformer', 'CatTransformer']
if _kaggle and 'CatTransformer' in _included_transformers:
# Just handle all cats directly
_included_transformers.remove('CatTransformer')
_can_handle_non_numeric = True # tell DAI we can handle non-numeric (i.e. strings)
_can_handle_categorical = True # tell DAI we can handle numerically encoded categoricals for use as categoricals
_num_as_cat = False or _kaggle # treating numeric as categorical best handled per column, but can force all numerics as cats
_num_as_num = False
_mutate_all = True # tell DAI we fully control mutation
_mutate_by_one = False # tell our recipe only changes one key at a time, can limit exploration if set as True
_mutate_by_one_sometimes = True
_always_defaults = False
_randomized_random_state = False
_overfit_limit_iteration_step = 10
# tell DAI want to keep track of self.params changes during fit, and to average numeric values across folds (if any)
_used_return_params = True
_average_return_params = True
# other DAI vars
_regression = False
_binary = True
_multiclass = True
_parallel_task = True # set to False may lead to faster performance if not doing grid search or cv search (should also set expert batch_cpu_tuning_max_workers to number of cores)
_fit_by_iteration = True
_fit_iteration_name = 'max_iter'
_display_name = "LR"
_description = "Logistic Regression"
_allow_basis_of_default_individuals = False
_fs_permute_must_use_self = True
_check_stall = False # avoid stall check, joblib loky stuff detatches sometimes
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
# recipe vars for encoding choices
_use_numerics = True
_use_ohe_encoding = True
_use_target_encoding = False
_use_target_encoding_other = False
_use_ordinal_encoding = False
_use_catboost_encoding = False or _kaggle # Note: Requires data be randomly shuffled so target is not in special order
_use_woe_encoding = False
# tell DAI what pip modules we will use
_modules_needed_by_name = ['category_encoders']
if _use_target_encoding_other:
_modules_needed_by_name.extend(['target_encoding'])
# _modules_needed_by_name.extend(['git+https://github.com/h2oai/target_encoding#egg=target_encoding'])
# whether to show debug prints and write munged view to disk
_debug = True
# wehther to cache feature results, only by transformer instance and X shape, so risky to use without care.
_cache = False
_ensemble = False
def set_default_params(self, accuracy=10, time_tolerance=10,
interpretability=1, **kwargs):
# Fill up parameters we care about
self.params = {}
self.mutate_params(get_default=True, accuracy=accuracy, time_tolerance=time_tolerance,
interpretability=interpretability, **kwargs)
def mutate_params(self, accuracy=10, time_tolerance=10, interpretability=1, **kwargs):
get_default = 'get_default' in kwargs and kwargs['get_default'] or self._always_defaults
params_orig = copy.deepcopy(self.params)
# control some behavior by how often the model was mutated.
# Good models that improve get repeatedly mutated, bad models tend to be one-off mutations of good models
if get_default:
self.params['mutation_count'] = 0
else:
if 'mutation_count' in self.params:
self.params['mutation_count'] += 1
else:
self.params['mutation_count'] = 0
# keep track of fit count, for other control over hyper parameter search in this recipe
if 'fit_count' not in self.params:
self.params['fit_count'] = 0
self.params['random_state'] = kwargs.get("random_state", 1234)
if self._randomized_random_state:
self.params['random_state'] = random.randint(0, 32000)
self.params['n_jobs'] = self.params_base.get('n_jobs', max(1, physical_cores_count))
# Modify certain parameters for tuning
if self._kaggle:
C_list = [0.095, 0.1, 0.115, 0.11, 0.105, 0.12, 0.125, 0.13, 0.14]
else:
C_list = [0.05, 0.075, 0.1, 0.15, 0.2, 1.0, 5.0]
self.params["C"] = float(np.random.choice(C_list)) if not get_default else 0.12
tol_list = [1e-4, 1e-3, 1e-5]
if accuracy < 5:
default_tol = 1e-4
elif accuracy < 6:
default_tol = 1e-5
elif accuracy <= 7:
default_tol = 1e-6
else:
default_tol = 1e-7
if self._kaggle:
default_tol = 1e-8
if default_tol not in tol_list:
tol_list.append(default_tol)
self.params["tol"] = float(np.random.choice(tol_list)) if not (self._kaggle or get_default) else default_tol
# solver_list = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
# newton-cg too slow
# sag too slow
# solver_list = ['lbfgs', 'liblinear', 'saga']
solver_list = ['lbfgs']
self.params["solver"] = str(np.random.choice(solver_list)) if not get_default else 'lbfgs'
if self._kaggle:
max_iter_list = [300, 350, 400, 450, 500, 700, 800, 900, 1000, 1500]
else:
max_iter_list = [150, 175, 200, 225, 250, 300]
self.params["max_iter"] = int(np.random.choice(max_iter_list)) if not get_default else 700
# self.params["max_iter"] = 37
if self.params["solver"] in ['lbfgs', 'newton-cg', 'sag']:
penalty_list = ['l2', 'none']
elif self.params["solver"] in ['saga']:
penalty_list = ['l1', 'l2', 'none']
elif self.params["solver"] in ['liblinear']:
penalty_list = ['l1']
else:
raise RuntimeError("No such solver: %s" % self.params['solver'])
self.params["penalty"] = str(np.random.choice(penalty_list)) if not (self._kaggle or get_default) else 'l2'
if self.params["penalty"] == 'elasticnet':
l1_ratio_list = [0, 0.25, 0.5, 0.75, 1.0]
self.params["l1_ratio"] = float(np.random.choice(l1_ratio_list))
else:
self.params.pop('l1_ratio', None)
if self.params["penalty"] == 'none':
self.params.pop('C', None)
else:
self.params['C'] = float(np.random.choice(C_list)) if not get_default else 0.12
if self.num_classes > 2:
self.params['multi_class'] = 'auto'
strategy_list = ['mean', 'median', 'most_frequent', 'constant']
self.params['strategy'] = str(np.random.choice(strategy_list)) if not get_default else 'mean'
if self._use_target_encoding:
min_samples_leaf_list = [1, 10, 50, 100]
self.params['min_samples_leaf'] = float(np.random.choice(min_samples_leaf_list))
smoothing_list = [1.0, 0.5, 10.0, 50.0]
self.params['smoothing'] = float(np.random.choice(smoothing_list))
if self._use_catboost_encoding:
if self._kaggle:
sigma_list = [None, 0.1, 0.2, 0.3, 0.4, 0.45, 0.5, 0.55, 0.6, 0.7, 0.8, 0.9]
else:
sigma_list = [None, 0.01, 0.05, 0.1, 0.5]
self.params['sigma'] = random.choice(sigma_list)
if self._use_woe_encoding:
randomized_list = [True, False]
self.params['randomized'] = random.choice(randomized_list)
sigma_woe_list = [0.05, 0.001, 0.01, 0.1, 0.005]
self.params['sigma_woe'] = random.choice(sigma_woe_list)
regularization_list = [1.0, 0.1, 2.0]
self.params['regularization'] = random.choice(regularization_list)
# control search in recipe
self.params['grid_search_iterations'] = accuracy >= 8
# cv search for hyper parameters, can be used in conjunction with _grid_search_by_iterations = True or False
self.params['cv_search'] = accuracy >= 9
if self._mutate_by_one_sometimes:
if np.random.random() > 0.5:
do_mutate_by_one = True
else:
do_mutate_by_one = False
else:
do_mutate_by_one = self._mutate_by_one
if do_mutate_by_one and not get_default and params_orig:
pick_key = str(np.random.choice(list(self.params.keys()), size=1)[0])
value = self.params[pick_key]
self.params = copy.deepcopy(params_orig)
self.params[pick_key] = value
# validate parameters to avoid single key leading to invalid overall parameters
if pick_key == 'penalty':
# has restrictions need to switch other keys if mismatched
if self.params["solver"] in ['lbfgs', 'newton-cg', 'sag']:
penalty_list = ['l2', 'none']
elif self.params["solver"] in ['saga']:
penalty_list = ['l1', 'l2', 'none']
elif self.params["solver"] in ['liblinear']:
penalty_list = ['l1']
if not self.params['penalty'] in penalty_list:
self.params['penalty'] = penalty_list[0] # just choose first
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
if self._kaggle_mode and eval_set is not None:
new_X = dt.rbind([X, eval_set[0][0]])
new_sample_weight = np.concatenate([sample_weight, sample_weight_eval_set[0]])
new_sample_weight[X.shape[0]:X.shape[0] + eval_set[0][0].shape[0]] = 0
new_y = np.concatenate([y, eval_set[0][1]])
X = new_X
y = new_y
sample_weight = new_sample_weight
orig_dir = os.getcwd()
os.chdir(self.context.experiment_tmp_dir) # for joblib
os.makedirs(self.context.experiment_tmp_dir, exist_ok=True) # another copy for DAI transformers
orig_cols = list(X.names)
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
min_count = np.min(np.unique(y, return_counts=True)[1])
if min_count < 9:
self.params['cv_search'] = False
if min_count < 3:
self.params['grid_search_iterations'] = False
self.params['cv_search'] = False
if self._ensemble:
self.params['grid_search_iterations'] = False
self.params['cv_search'] = False
# save pre-datatable-imputed X
X_dt = X
# Apply OOB imputation
self.oob_imputer = OOBImpute(self._impute_num_type, self._impute_int_type, self._impute_bool_type,
self._impute_cat_type, self._oob_bool, self._oob_cat)
X = self.oob_imputer.fit_transform(X)
# convert to pandas for sklearn
X = X.to_pandas()
X_orig_cols_names = list(X.columns)
if self._kaggle_features:
self.features = make_features(cache=self._cache)
X = self.features.fit_transform(X, y, **kwargs)
else:
self.features = None
# print("LR: pandas dtypes: %s" % (str(list(X.dtypes))))
# FEATURE GROUPS
# Choose which features are numeric or categorical
cat_features = [x for x in X_orig_cols_names if CatOriginalTransformer.is_me_transformed(x)]
catlabel_features = [x for x in X_orig_cols_names if CatTransformer.is_me_transformed(x)]
# can add explicit column name list to below force_cats
force_cats = cat_features + catlabel_features
actual_numerical_features = (X.dtypes == 'float') | (X.dtypes == 'float32') | (
X.dtypes == 'float64') # | (X.dtypes == 'int') | (X.dtypes == 'int32') | (X.dtypes == 'int64') | (X.dtypes == 'bool')
# choose if numeric is treated as categorical
if not self._num_as_cat or self._num_as_num:
# treat (e.g.) binary as both numeric and categorical
numerical_features = copy.deepcopy(actual_numerical_features)
else:
# no numerics
numerical_features = X.dtypes == 'invalid'
if self._num_as_cat:
# then can't have None sent to cats, impute already up front
# force oob imputation for numerics
self.oob_imputer = OOBImpute('oob', 'oob', 'oob',
self._impute_cat_type, self._oob_bool, self._oob_cat)
X = self.oob_imputer.fit_transform(X_dt)
X = X.to_pandas()
if self._kaggle_features:
X = self.features.fit_transform(X, y, **kwargs)
if self._kaggle_features:
numerical_features = self.features.update_numerical_features(numerical_features)
if not self._num_as_cat:
# then cats are only things that are not numeric
categorical_features = ~actual_numerical_features
else:
# then everything is a cat
categorical_features = ~numerical_features # (X.dtypes == 'invalid')
# below can lead to overlap between what is numeric and what is categorical
more_cats = (pd.Series([True if x in force_cats else False for x in list(categorical_features.index)],
index=categorical_features.index))
categorical_features = (categorical_features) | (more_cats)
if self._kaggle_features:
categorical_features = self.features.update_categorical_features(categorical_features)
cat_X = X.loc[:, categorical_features]
num_X = X.loc[:, numerical_features]
if self._debug:
print("LR: Cat names: %s" % str(list(cat_X.columns)))
print("LR: Num names: %s" % str(list(num_X.columns)))
# TRANSFORMERS
lr_params = copy.deepcopy(self.params)
lr_params.pop('grid_search_by_iterations', None)
lr_params.pop('cv_search', None)
grid_search = False # WIP
full_features_list = []
transformers = []
if self._use_numerics and any(numerical_features.values):
impute_params = {}
impute_params['strategy'] = lr_params.pop('strategy', 'mean')
full_features_list.extend(list(num_X.columns))
transformers.append(
(make_pipeline(SimpleImputer(**impute_params), StandardScaler()), numerical_features)
)
# http://contrib.scikit-learn.org/categorical-encoding/
if self._use_ordinal_encoding and any(categorical_features.values):
ord_params = dict(handle_missing='value', handle_unknown='value')
full_features_list.extend(list(cat_X.columns))
# Note: OrdinalEncoder doesn't handle unseen features, while CategoricalEncoder used too
import category_encoders as ce
transformers.append(
(ce.OrdinalEncoder(**ord_params), categorical_features)
)
if self._use_catboost_encoding and any(categorical_features.values):
cb_params = dict(handle_missing='value', handle_unknown='value')
cb_params['sigma'] = lr_params.pop('sigma')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.CatBoostEncoder(**cb_params), categorical_features)
)
if self._use_woe_encoding and any(categorical_features.values):
woe_params = dict(handle_missing='value', handle_unknown='value')
woe_params['randomized'] = lr_params.pop('randomized')
woe_params['sigma'] = lr_params.pop('sigma_woe')
woe_params['regularization'] = lr_params.pop('regularization')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.WOEEncoder(**woe_params), categorical_features)
)
if self._use_target_encoding and any(categorical_features.values):
te_params = dict(handle_missing='value', handle_unknown='value')
te_params['min_samples_leaf'] = lr_params.pop('min_samples_leaf')
te_params['smoothing'] = lr_params.pop('smoothing')
full_features_list.extend(list(cat_X.columns))
import category_encoders as ce
transformers.append(
(ce.TargetEncoder(**te_params), categorical_features)
)
if self._use_target_encoding_other and any(categorical_features.values):
full_features_list.extend(list(cat_X.columns))
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.params['random_state'])
split_cv = [cv]
# split_cv = [3, 3]
ALPHA, MAX_UNIQUE, FEATURES_COUNT = get_TE_params(cat_X, debug=self._debug)
from target_encoding import TargetEncoder
transformers.append(
(TargetEncoder(alpha=ALPHA, max_unique=MAX_UNIQUE, split_in=split_cv),
categorical_features)
)
if self._use_ohe_encoding and any(categorical_features.values):
transformers.append(
(OneHotEncoder(handle_unknown='ignore', sparse=True), categorical_features)
)
assert len(transformers) > 0, "should have some features"
preprocess = make_column_transformer(*transformers)
# ESTIMATOR
lr_defaults = dict(penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None)
allowed_lr_kwargs_keys = lr_defaults.keys()
lr_params_copy = copy.deepcopy(lr_params)
for k, v in lr_params_copy.items():
if k not in allowed_lr_kwargs_keys:
lr_params.pop(k, None)
del lr_params_copy
can_score = self.num_classes == 2 and 'AUC' in self.params_base['score_f_name'].upper()
# print("LR: can_score: %s" % str(can_score))
if can_score:
scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_proba=True)
else:
scorer = None
if not ('C' in lr_params or 'l1_ratios' in lr_params):
# override
self.params['cv_search'] = False
if not self.params['cv_search']:
estimator = LogisticRegression(**lr_params)
estimator_name = 'logisticregression'
else:
lr_params_cv = copy.deepcopy(lr_params)
if 'C' in lr_params:
lr_params_cv['Cs'] = self.get_param_range(self.params['C'], self.params['fit_count'], func_type='log')
# print("LR: CV: Cs: %s" % str(lr_params_cv['Cs']))
if 'l1_ratios' in lr_params:
lr_params_cv['l1_ratios'] = self.get_param_range(self.params['l1_ratio'], self.params['fit_count'],
func_type='linear')
# print("LR: CV: l1_ratios: %s" % str(lr_params_cv['l1_ratios']))
lr_params_cv.pop('n_jobs', None)
lr_params_cv.pop('C', None)
lr_params_cv.pop('l1_ratio', None)
if lr_params_cv['penalty'] == 'none':
lr_params_cv['penalty'] = 'l2'
estimator = LogisticRegressionCV(n_jobs=self.params['n_jobs'],
cv=3, refit=True, scoring=scorer, **lr_params_cv)
estimator_name = 'logisticregressioncv'
# PIPELINE
if not self._ensemble:
model = make_pipeline(
preprocess,
estimator, memory="./")
else:
ALPHA, MAX_UNIQUE, FEATURES_COUNT = get_TE_params(cat_X, debug=self._debug)
from target_encoding import TargetEncoderClassifier
te_estimator = TargetEncoderClassifier(alpha=ALPHA, max_unique=MAX_UNIQUE, used_features=FEATURES_COUNT)
estimators = [(estimator_name, estimator), ('teclassifier', te_estimator)]
model = make_pipeline(
preprocess,
VotingClassifier(estimators))
# FIT
if self.params['grid_search_iterations'] and can_score:
# WIP FIXME for multiclass and other scorers
from sklearn.model_selection import GridSearchCV
max_iter_range = self.get_param_range(self.params['max_iter'], self.params['fit_count'],
range_limit=self._overfit_limit_iteration_step, func_type='log')
# print("LR: max_iter_range: %s" % str(max_iter_range))
param_grid = {
'%s__max_iter' % estimator_name: max_iter_range,
}
grid_clf = GridSearchCV(model, param_grid, n_jobs=self.params['n_jobs'],
cv=3, iid=True, refit=True, scoring=scorer)
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
grid_clf.fit(X, y, **fitkwargs)
model = grid_clf.best_estimator_
# print("LR: best_index=%d best_score: %g best_params: %s" % (
# grid_clf.best_index_, grid_clf.best_score_, str(grid_clf.best_params_)))
elif grid_search:
# WIP
from sklearn.model_selection import GridSearchCV
param_grid = {
'columntransformer__pipeline__simpleimputer__strategy': ['mean', 'median'],
'%s__C' % estimator_name: [0.1, 0.5, 1.0],
}
grid_clf = GridSearchCV(model, param_grid, cv=10, iid=False)
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
grid_clf.fit(X, y, **fitkwargs)
model = grid_clf.best_estimator_
# self.best_params = grid_clf.best_params_
else:
fitkwargs = dict()
fitkwargs["%s__sample_weight" % estimator_name] = sample_weight
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(value=0)
model.fit(X, y, **fitkwargs)
# get actual LR model
lr_model = model.named_steps[estimator_name]
# average importances over classes
importances = np.average(np.fabs(np.array(lr_model.coef_)), axis=0)
# average iterations over classes (can't take max_iter per class)
iterations = int(np.average(lr_model.n_iter_))
# print("LR: iterations: %d" % iterations)
if self._debug:
full_features_list_copy = copy.deepcopy(full_features_list)
# reduce OHE features to original names
ohe_features_short = []
if self._use_ohe_encoding and any(categorical_features.values):
input_features = [x + self._ohe_postfix for x in cat_X.columns]
ohe_features = pd.Series(
model.named_steps['columntransformer'].named_transformers_['onehotencoder'].get_feature_names(
input_features=input_features))
def f(x):
return '_'.join(x.split(self._ohe_postfix + '_')[:-1])
# identify OHE features
ohe_features_short = ohe_features.apply(lambda x: f(x))
full_features_list.extend(list(ohe_features_short))
if self._debug:
full_features_list_copy.extend(list(ohe_features))
imp = pd.Series(importances, index=full_features_list_copy).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("prepreimp_%s.csv" % struuid)
if self._debug:
imp = pd.Series(importances, index=full_features_list).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("preimp_%s.csv" % struuid)
# aggregate our own features
if self._kaggle_features:
full_features_list = self.features.aggregate(full_features_list, importances)
msg = "LR: num=%d cat=%d : ohe=%d : imp=%d full=%d" % (
len(num_X.columns), len(cat_X.columns), len(ohe_features_short), len(importances), len(full_features_list))
if self._debug:
print(msg)
assert len(importances) == len(full_features_list), msg
if self._debug:
imp = pd.Series(importances, index=full_features_list).sort_values(ascending=False)
import uuid
struuid = str(uuid.uuid4())
imp.to_csv("imp_%s.csv" % struuid)
# aggregate importances by dai feature name
importances = pd.Series(np.abs(importances), index=full_features_list).groupby(level=0).mean()
assert len(importances) == len(
X_orig_cols_names), "lenimp=%d lenorigX=%d msg=%s : X.columns=%s dtypes=%s : full_features_list=%s" % (
len(importances), len(X_orig_cols_names), msg,
str(list(X.columns)), str(list(X.dtypes)), str(full_features_list))
# save hyper parameter searched results for next search
self.params['max_iter'] = iterations
if self.params['cv_search']:
self.params['C'] = np.average(lr_model.C_, axis=0)
if 'l1_ratios' in lr_params and self.params['cv_search']:
self.params['l1_ratio'] = np.average(lr_model.l1_ratio_, axis=0)
if 'fit_count' in self.params:
self.params['fit_count'] += 1
else:
self.params['fit_count'] = 0
importances_list = importances.tolist()
importances_list = list(np.array(importances_list) / np.max(importances_list))
self.set_model_properties(model=(model, self.features),
features=orig_cols,
importances=importances_list,
iterations=iterations)
self.features = None
os.chdir(orig_dir)
def get_param_range(self, param, fit_count, range_limit=None, func_type='linear'):
if func_type == 'log':
f = np.log
inv_f = np.exp
bottom = 1.0
top = 1.0
else:
f = np.abs
inv_f = np.abs
top = bottom = 1.0
# bisect toward optimal param
step_count = 3
params_step = 2 + fit_count
start_range = param * (1.0 - bottom / params_step)
end_range = param * (1.0 + top / params_step)
if range_limit is not None:
if end_range - start_range < range_limit:
# if below some threshold, don't keep refining to avoid overfit
return [param]
start = f(start_range)
end = f(end_range)
step = 1.0 * (end - start) / step_count
param_range = np.arange(start, end, step)
if type(param) == int:
param_range = [int(inv_f(x)) for x in param_range if int(inv_f(x)) > 0]
else:
param_range = [inv_f(x) for x in param_range if inv_f(x) > 0]
if param not in param_range:
param_range.append(param)
param_range = sorted(param_range)
return param_range
def predict(self, X, **kwargs):
orig_dir = os.getcwd()
os.chdir(self.context.experiment_tmp_dir) # for joblib
X = dt.Frame(X)
X = self.oob_imputer.transform(X)
model_tuple, _, _, _ = self.get_model_properties()
model, features = model_tuple
X = X.to_pandas()
if self._kaggle_features and features is not None:
X = features.transform(X)
X = X.replace([np.inf, -np.inf], np.nan)
X = X.fillna(value=0)
if self.num_classes == 1:
preds = model.predict(X)
else:
preds = model.predict_proba(X)
os.chdir(orig_dir)
return preds
class OOBImpute(object):
def __init__(self, impute_num_type, impute_int_type, impute_bool_type, impute_cat_type, oob_bool, oob_cat):
self._impute_num_type = impute_num_type
self._impute_int_type = impute_int_type
self._impute_bool_type = impute_bool_type
self._impute_cat_type = impute_cat_type
self._oob_bool = oob_bool
self._oob_cat = oob_cat
def fit(self, X: dt.Frame):
# just ignore output
self.fit_transform(X)
def fit_transform(self, X: dt.Frame):
# IMPUTE
# print("LR: types number of columns: %d : %d %d %d %d" % (len(X.names), len(X[:, [float]].names), len(X[:, [int]].names), len(X[:, [bool]].names), len(X[:, [str]].names)))
for col in X[:, [float]].names:
XX = X[:, col]
XX.replace(None, np.nan)
X[:, col] = XX
if self._impute_num_type == 'oob':
# Replace missing values with a value smaller than all observed values
self.min = dict()
for col in X[:, [float]].names:
XX = X[:, col]
self.min[col] = XX.min1()
if self.min[col] is None or np.isnan(self.min[col]):
self.min[col] = -1e10
else:
self.min[col] -= 1
XX.replace(None, self.min[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_int_type == 'oob':
# Replace missing values with a value smaller than all observed values
self.min_int = dict()
for col in X[:, [int]].names:
XX = X[:, col]
self.min_int[col] = XX.min1()
if self.min_int[col] is None or np.isnan(self.min_int[col]):
self.min_int[col] = 0
XX.replace(None, self.min_int[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_bool_type == 'oob':
for col in X[:, [bool]].names:
XX = X[:, col]
XX.replace(None, self._oob_bool)
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
if self._impute_cat_type == 'oob':
for col in X[:, [str]].names:
XX = X[:, col]
XX.replace(None, self._oob_cat)
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
return X
def transform(self, X: dt.Frame):
if self._impute_num_type == 'oob':
for col in X[:, [float]].names:
XX = X[:, col]
XX.replace(None, self.min[col])
X[:, col] = XX
if self._impute_int_type == 'oob':
for col in X[:, [int]].names:
XX = X[:, col]
XX.replace(None, self.min_int[col])
X[:, col] = XX
if self._impute_bool_type == 'oob':
for col in X[:, [bool]].names:
XX = X[:, col]
XX.replace(None, self._oob_bool)
X[:, col] = XX
if self._impute_cat_type == 'oob':
for col in X[:, [str]].names:
XX = X[:, col]
XX.replace(None, self._oob_cat)
X[:, col] = XX
return X
class make_features(object):
_postfix = "@%@(&#%@))){}#"
def __init__(self, cache=False):
self.cache = cache
self.dai_te = False
self.other_te = True
self.new_names_dict = {}
self.raw_names_dict = {}
self.raw_names_dict_reversed = {}
self.spring = None
self.summer = None
self.fall = None
self.winter = None
self.monthcycle1 = None
self.monthcycle2 = None
self.weekend = None
self.daycycle1 = None
self.daycycle2 = None
self.lexi = None
self.ord5sorted = None
self.ord5more1 = None
self.ord5more2 = None
def apply_clone(self, src):
for k, v in src.__dict__.items():
setattr(self, k, v)
def fit_transform(self, X: pd.DataFrame, y=None, transform=False, **kwargs):
if not transform:
self.orig_cols = list(X.columns)
if 'IS_LEAKAGE' in kwargs or 'IS_SHIFT' in kwargs:
self.raw_names_dict = {v: v for v in list(X.columns)}
self.raw_names_dict_reversed = {v: k for k, v in self.raw_names_dict.items()}
else:
self.raw_names_dict = {Transformer.raw_feat_name(v): v for v in list(X.columns)}
self.raw_names_dict_reversed = {v: k for k, v in self.raw_names_dict.items()}
file = "munged_%s_%s_%d_%d.csv" % (__name__, transform, X.shape[0], X.shape[1])
file = file.replace("csv", "pkl")
file2 = file.replace("munged", "clone")
if self.cache and os.path.isfile(file) and os.path.isfile(file2):
# X = pd.read_csv(file, sep=',', header=0)
X = load_obj(file)
X = X.drop("target", axis=1, errors='ignore')
if not transform:
self.apply_clone(load_obj(file2))
return X
if 'bin_0' in self.raw_names_dict:
X.drop(self.raw_names_dict['bin_0'], errors='ignore')
if 'bin_3' in self.raw_names_dict:
X.drop(self.raw_names_dict['bin_3'], errors='ignore')
# use circular color wheel position for nom_0
def nom12num(x):
# use number of sides
d = {'Circle': 0, 'Polygon': -1, 'Star': 10, 'Triangle': 3, 'Square': 4, 'Trapezoid': 5}
return d[x]
X, self.sides = self.make_feat(X, 'nom_1', 'sides', nom12num)
def nom22num(x):
# use family level features expanded encoding or relative size for nom_2
# ordered by height
d = {'Snake': 0, 'Axolotl': 1, 'Hamster': 2, 'Cat': 3, 'Dog': 4, 'Lion': 5}
return d[x]
X, self.animal = self.make_feat(X, 'nom_2', 'animal', nom22num)
# def has_char(x, char):
# x_str = str(x)
# return 1 if char.upper() in x_str.upper() else 0
# self.haschars = [None] * len(self.orig_cols)
# for ni, c in enumerate(self.orig_cols):
# X, self.lenfeats[ni] = self.make_feat(X, c, 'len', get_len)
def get_len(x):
x_str = str(x)
return len(x_str)
self.lenfeats = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.lenfeats[ni] = self.make_feat(X, c, 'len', get_len)
#
def get_first(x):
x_str = str(x)
return x_str[0] if len(x_str) > 0 else ""
self.firstchar = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.firstchar[ni] = self.make_feat(X, c, 'firstc', get_first, is_float=False)
#
def get_last(x):
x_str = str(x)
return x_str[-1] if len(x_str) > 0 else ""
self.lastchar = [None] * len(self.orig_cols)
for ni, c in enumerate(self.orig_cols):
X, self.lastchar[ni] = self.make_feat(X, c, 'lastc', get_last, is_float=False)
#
hex_strings = ['nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']
#
if True:
# convert hex to binary and use as 8-feature (per hex feature) encoding
def get_charnum(x, i=None):
return str(x)[i]
width = 9
self.hexchar = [None] * len(hex_strings) * width
for ni, c in enumerate(hex_strings):
for nii in range(0, width):
X, self.hexchar[ni * width + nii] = self.make_feat(X, c, 'hexchar%d' % nii, get_charnum,
is_float=False, i=nii)
#
def hex_to_int(x):
x_int = int(eval('0x' + str(x)))
return x_int
self.hexints = [None] * len(hex_strings)
for ni, c in enumerate(hex_strings):
X, self.hexints[ni] = self.make_feat(X, c, 'hex2int', hex_to_int)
#
if False: # ValueError: could not convert string to float: b'\x05\x0f\x11k\xcf'
def hex_to_string(x):
try:
x_str = codecs.decode('0' + x, 'hex')
except:
x_str = codecs.decode(x, 'hex')
return x_str
self.hexstr = [None] * len(hex_strings)
for ni, c in enumerate(hex_strings):
X, self.hexstr[ni] = self.make_feat(X, c, 'hex2str', hex_to_string, is_float=False)
def bin012a(x):
return bool(x[0]) & bool(x[1]) & bool(x[2])
X, self.bin012a = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012a', bin012a)
def bin012b(x):
return (bool(x[0]) ^ bool(x[1])) ^ bool(x[2])
X, self.bin012b = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012b', bin012b)
def bin012c(x):
return bool(x[0]) ^ (bool(x[1]) ^ bool(x[2]))
X, self.bin012c = self.make_feat(X, ['bin_0', 'bin_1', 'bin_2'], 'bin012c', bin012c)
# TODO: manual OHE fixed width for out of 16 digits always (not sure all rows lead to all values)
# one-hot encode text by each character
# use geo-location for nom_3
# use static mapping encoding for ord_2 and ord_1
def ord12num1(x):
# ordered label
d = {'Novice': 0, 'Contributor': 1, 'Expert': 2, 'Master': 3, 'Grandmaster': 4}
return d[x]
X, self.kaggle1 = self.make_feat(X, 'ord_1', 'kaggle1', ord12num1)
def ord12num2(x):
# medals total
d = {'Novice': 0, 'Contributor': 0, 'Expert': 2, 'Master': 3, 'Grandmaster': 6}
return d[x]
X, self.kaggle2 = self.make_feat(X, 'ord_1', 'kaggle2', ord12num2)
def ord1master(x):
return 1 if 'master' in x or 'Master' in x else 0
X, self.kaggle3 = self.make_feat(X, 'ord_1', 'kaggle3', ord1master)
def ord22num(x):
# ordered label
d = {'Freezing': 0, 'Cold': 1, 'Warm': 2, 'Hot': 3, 'Boiling Hot': 4, 'Lava Hot': 5}
return d[x]
X, self.temp1 = self.make_feat(X, 'ord_2', 'temp1', ord22num)
def ord22num2(x):
# temp in F
d = {'Freezing': 32, 'Cold': 50, 'Warm': 80, 'Hot': 100, 'Boiling Hot': 212, 'Lava Hot': 1700}
return d[x]
X, self.temp2 = self.make_feat(X, 'ord_2', 'temp2', ord22num2)
def ord2hot(x):
return 1 if 'hot' in x or 'Hot' in x else 0
X, self.temp4 = self.make_feat(X, 'ord_2', 'temp4', ord2hot)
# lower ord_5
def ord5more0(x):
return x.lower()
X, self.ord5more0 = self.make_feat(X, 'ord_5', 'more0', ord5more0, is_float=False)
# 1st char, keep for OHE
def ord5more1(x):
return x[0]
X, self.ord5more1 = self.make_feat(X, 'ord_5', 'more1', ord5more1, is_float=False)
# 2nd char, keep for OHE
def ord5more2(x):
return x[1]
X, self.ord5more2 = self.make_feat(X, 'ord_5', 'more2', ord5more2, is_float=False)
# 1st char, keep for OHE
def ord5more3(x):
return x[0].lower()
X, self.ord5more3 = self.make_feat(X, 'ord_5', 'more3', ord5more3, is_float=False)
# 2nd char, keep for OHE
def ord5more4(x):
return x[1].lower()
X, self.ord5more4 = self.make_feat(X, 'ord_5', 'more4', ord5more4, is_float=False)
# 1st word, keep for OHE
def ord2more1(x):
return x.split(" ")[0]
X, self.ord2more1 = self.make_feat(X, 'ord_2', 'more1', ord2more1, is_float=False)
# 2nd word, keep for OHE
def ord2more2(x):
a = x.split(" ")
if len(a) > 1:
return a[1]
else:
return a[0]
X, self.ord2more2 = self.make_feat(X, 'ord_2', 'more2', ord2more2, is_float=False)
# use lexi LE directly as integers for alphabetical (ord_5, ord_4, ord_3)
orig_feat_names = ['ord_5', 'ord_4', 'ord_3',
'nom_0', 'nom_1', 'nom_2',
'nom_3', 'nom_4', 'nom_5',
'nom_6', 'nom_7', 'nom_8',
'nom_9', 'ord_1', 'ord_2']
orig_feat_names = [self.raw_names_dict_reversed[x] for x in
list(self.orig_cols)] # try just encoding all columns
new_names = ['lexi%d' % x for x in range(len(orig_feat_names))]
if not transform:
self.lexi = [None] * len(orig_feat_names)
self.lexi_names = [None] * len(orig_feat_names)
for ni, (new_name, orig_feat_name) in enumerate(zip(new_names, orig_feat_names)):
if orig_feat_name in self.raw_names_dict and self.raw_names_dict[orig_feat_name] in X.columns:
dai_feat_name = self.raw_names_dict[orig_feat_name]
if transform:
Xnew = self.lexi[ni].transform(X[[dai_feat_name]])
else:
self.lexi[ni] = LexiLabelEncoderTransformer([dai_feat_name])
Xnew = self.lexi[ni].fit_transform(X[[dai_feat_name]])
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(pd.isnull(Xnew).values.ravel())
X = pd.concat([X, Xnew], axis=1)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.lexi_names[ni] = new_feat_name
if False: # already done by lexi encoding
# sorted label encoding of ord_5, use for numeric
orig_feat_name = 'ord_5'
new_name = 'ord5sorted'
if orig_feat_name in self.raw_names_dict and self.raw_names_dict[orig_feat_name] in X.columns:
dai_feat_name = self.raw_names_dict[orig_feat_name]
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
if not transform:
self.ord_5_sorted = sorted(list(set(X[dai_feat_name].values)))
self.ord_5_sorted = dict(zip(self.ord_5_sorted, range(len(self.ord_5_sorted))))
X.loc[:, new_feat_name] = X[dai_feat_name].apply(
lambda x: self.ord_5_sorted[x] if x in self.ord_5_sorted else -1).astype(np.float32)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.ord5sorted = new_feat_name
# frequency encode everything
# keep as cat for OHE
if not transform:
self.freq = [None] * len(self.orig_cols)
self.freq_names = [None] * len(self.orig_cols)
for ni, c in enumerate(list(self.orig_cols)):
new_name = "freq%d" % ni
dai_feat_name = c
if transform:
Xnew = self.freq[ni].transform(X[[dai_feat_name]].astype(str)).to_pandas()
else:
self.freq[ni] = FrequentTransformer([dai_feat_name])
Xnew = self.freq[ni].fit_transform(X[[dai_feat_name]].astype(str)).to_pandas()
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(pd.isnull(Xnew).values.ravel())
X = pd.concat([X, Xnew], axis=1)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.freq_names[ni] = new_feat_name
if self.dai_te:
# target encode everything
# use as numeric and categorical
if not transform:
self.te = [None] * len(self.orig_cols)
self.te_names = [None] * len(self.orig_cols)
for ni, c in enumerate(list(self.orig_cols)):
new_name = "te%d" % ni
dai_feat_name = c
if transform:
Xnew = self.te[ni].transform(X[[dai_feat_name]].astype(str), y).to_pandas()
else:
self.te[ni] = CVTargetEncodeTransformer([dai_feat_name])
Xnew = self.te[ni].fit_transform(X[[dai_feat_name]].astype(str), y).to_pandas()
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any(pd.isnull(Xnew).values.ravel())
X = pd.concat([X, Xnew], axis=1)
self.new_names_dict[new_feat_name] = [dai_feat_name]
self.te_names[ni] = new_feat_name
if self.other_te:
# target encode lexilabel encoded features
# use as numeric and categorical
if not transform:
self.teo = [None] * len(self.lexi_names)
self.teo_names = [None] * len(self.lexi_names)
for ni, c in enumerate(self.lexi_names):
if c is None:
continue
new_name = "teo%d" % ni
dai_feat_name = c
X_local = X.loc[:, [dai_feat_name]].astype(str)
if transform:
Xnew = pd.DataFrame(self.teo[ni].transform_test(X_local))
else:
from target_encoding import TargetEncoder
ALPHA, MAX_UNIQUE, FEATURES_COUNT = get_TE_params(X_local, debug=False)
self.teo[ni] = TargetEncoder(alpha=ALPHA, max_unique=MAX_UNIQUE, split_in=[3])
Xnew = pd.DataFrame(self.teo[ni].transform_train(X=X_local, y=y))
extra_name = self._postfix + new_name
new_feat_name = dai_feat_name + extra_name
Xnew.columns = [new_feat_name]
assert not any( | pd.isnull(Xnew) | pandas.isnull |
#!/usr/bin/env python
#
# analysis.py
#
# Copyright (c) 2018 <NAME>. All rights reserved.
import argparse
import time
import sys
import random
from sort import *
import pandas as pd
import matplotlib.pyplot as plt
# Utility
def print_err(*args, **kwargs):
print(*args, **kwargs, file=sys.stderr)
def parse_int(n):
try:
return int(n)
except ValueError:
return None
# Analysis
def analyze(sort_func, array, order=Order.LE):
""" Sorting method wrapper.
Execute sorting method on specified array.
Return Stats object filled with statistics.
"""
stats = Stats(len(array))
start = time.time()
sort_func(array, order=order, stats=stats)
end = time.time()
stats.time = end - start
return stats
def analyze_random(count, output=None, input=None):
""" Perform analysis using random arrays of sizes in 100...10000,
and plot them.
input -- input csv file
output -- output file name
"""
print_err('Random analysis started...')
if input is None:
row_list = []
alg_list = [(merge_sort, Algorithm.MERGE),
(quicksort, Algorithm.QUICK),
(dual_pivot_quicksort, Algorithm.DPQUICK),
(radix_sort, Algorithm.RADIX),
(hybrid_sort, Algorithm.HYBRID)]
for n in range(100, 10100, 100):
for func, alg in alg_list:
for _ in range(count):
arr = random.sample(range(n), n)
d = vars(analyze(func, arr))
d['algorithm'] = alg.name.lower()
row_list.append(d)
del arr
print_err("COMPLETED {} OK".format(n))
df = pd.DataFrame(row_list)
if not output is None:
df.to_csv(output)
print("File saved")
else:
df = pd.read_csv(input)
df['ncomp/n'] = np.where(df['length'] < 1, df['length'], df['ncomp']/df['length'])
df['nswap/n'] = np.where(df['length'] < 1, df['length'], df['nswap']/df['length'])
grouped = df.groupby(['length', 'algorithm']).mean(numeric_only=True)
ncomp_g = grouped.loc[:, ['ncomp']]
ncomp_g = pd.pivot_table(ncomp_g, values='ncomp', index='length', columns='algorithm')
nswap_g = grouped.loc[:, ['nswap']]
nswap_g = | pd.pivot_table(nswap_g, values='nswap', index='length', columns='algorithm') | pandas.pivot_table |
#from matplotlib.pyplot import title
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import plotly.express as px
from modzy import ApiClient
from modzy._util import file_to_bytes
import json
from sklearn.manifold import TSNE
import numpy as np
from pyvis.network import Network
from sklearn.cluster import KMeans
from wordcloud import WordCloud
import matplotlib.pyplot as plt
st.set_option('deprecation.showPyplotGlobalUse', False)
st.sidebar.image('text.png')
#col1,col2 = st.columns([1,6])
st.image('subtext.png')
#df=pd.read_csv("https://raw.githubusercontent.com/pupimvictor/NetworkOfThrones/master/stormofswords.csv")
df = pd.read_csv("https://raw.githubusercontent.com/napoles-uach/Data/main/got1.csv")
df=df[['Source','Target','weight']]
#st.write(df)
#weigths=df['weight'].tolist()
def got_func():
got_net = Network(height="600px", width="100%", heading='A song of Ice and Fire (Book 1) Graph')#,bgcolor='#222222', font_color='white')
# set the physics layout of the network
#got_net.barnes_hut()
got_net.force_atlas_2based()
#got_net.show_buttons(filter_=True)
#got_data = pd.read_csv("https://www.macalester.edu/~abeverid/data/stormofswords.csv")
got_data = pd.read_csv("https://raw.githubusercontent.com/napoles-uach/Data/main/got1.csv")
#got_data = pd.read_csv("stormofswords.csv")
#got_data.rename(index={0: "Source", 1: "Target", 2: "Weight"})
sources = got_data['Source']
targets = got_data['Target']
weights = got_data['weight']
edge_data = zip(sources, targets, weights)
for e in edge_data:
src = e[0]
dst = e[1]
w = e[2]
got_net.add_node(src, src, title=src, color='red')
got_net.add_node(dst, dst, title=dst,color='red')
got_net.add_edge(src, dst, value=w)
neighbor_map = got_net.get_adj_list()
# add neighbor data to node hover data
for node in got_net.nodes:
node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
node["value"] = len(neighbor_map[node["id"]])
got_net.show("gameofthrones.html")
got_func()
HtmlFile = open("gameofthrones.html", 'r', encoding='utf-8')
source_code = HtmlFile.read()
#check_graph = st.sidebar.checkbox('Show Graph')
#if check_graph:
with st.expander('Show Graph'):
components.html(source_code, width=670,height=700)
text = open("edges.txt","w")
text.write('graph')
for i in range(len(df)):
text.write('\n%s' % str(df.iloc[i][0]).replace(" ", "")+" "+str(df.iloc[i][1]).replace(" ", "")+" "+str(df.iloc[i][2]))
text.close()
f = open('edges.txt','r',encoding='utf-8')
client = ApiClient(base_url="https://app.modzy.com/api", api_key="<KEY>")
sources = {}
sources["my-input"] = {
"edges.txt": f.read(),
}
@st.cache()
def res(sources):
job = client.jobs.submit_text("sixvdaywy0", "0.0.1", sources)
result = client.results.block_until_complete(job, timeout=None)
return result
#job = client.jobs.submit_text("sixvdaywy0", "0.0.1", sources)
#result = client.results.block_until_complete(job, timeout=None)
result = res(sources)
#st.button('Download')
#st.balloons()
#st.stop()
results_json = result.get_first_outputs()['results.json']
x = results_json['Node Embeddings']
names_dict = []
vec_dict = []
for names in x:
names_dict.append(names)
v=x[names].split()
vec_dict.append(v)
# convert a list of string numbers to a list of float numbers
def convert_to_float(l):
return [float(i) for i in l]
vec_dict = [convert_to_float(i) for i in vec_dict]
chart_data=pd.DataFrame(vec_dict)
#traspose of the dataframe
chart_data=chart_data.T
#column names are the names of the nodes
chart_data.columns=names_dict
#st.bar_chart(chart_data['Aegon'])
#st.bar_chart(chart_data['Worm'])
@st.cache()
def do_tsne(vector,randomst,perp):
tsne = TSNE(random_state=randomst,perplexity=perp)
return tsne.fit_transform(vector)
digits_tsne = do_tsne(vec_dict,42,50)
#st.write('aqui')
#st.write(digits_tsne)
with st.sidebar.expander('TSNE'):
clusters_n = st.slider('Number of clusters for TSNE', min_value=3, max_value=10, value=3)
kmeans = KMeans(n_clusters=clusters_n, random_state=0).fit(digits_tsne)
#st.write(kmeans.labels_)
ejex = []
ejey = []
indice=[]
for i in range(len( digits_tsne )):
ejex.append( digits_tsne[i][0] )
ejey.append( digits_tsne[i][1] )
indice.append(i)
dic = {'ejex':ejex,'ejey':ejey,'indice':indice}
df = pd.DataFrame(dic)
#add a column with the name of the node
df['nombre']=names_dict
df['labels']=kmeans.labels_
#st.write(df)
fig = px.scatter(df,x='ejex',y='ejey',hover_data=['nombre'],color='labels')
#with st.sidebar.expander('TSNE'):
#check_tsne = st.sidebar.checkbox('Show TSNE plot')
#if check_tsne:
with st.expander('Modzi app 1'):
'''
### Graph Embeddings
#### Description
This model can be used to explore possible relationships between entities, such as finding people who share similar interests or finding biological interactions between pairs of proteins. Graphs are particularly useful for describing relational entities, and graph embedding is an approach used to transform a graph’s structure into a format digestible by an AI model, whilst preserving the graph’s properties.
Graph structures can widely vary in terms of their scale, specificity, and subject, making graph embedding a difficult task.
'''
with st.expander('Graph Embedings'):
st.write(chart_data)
with st.expander('Show TSNE plot'):
st.plotly_chart(fig)
text = open("text.txt","r")
paragraph = text.read().split('\n\n')
paragraphs_df = | pd.DataFrame(paragraph) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.