prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Filename: data_manager.py
Authors:
<NAME> - <EMAIL>
<NAME> - <EMAIL>
Description:
Python class to manage the dataset.
To-do:
"""
# standard imports
import logging as log
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), './'))
# third party imports
import pandas as pd
import xml.etree.ElementTree as ET
# local imports
from utilities import get_pd_of_statement
class DataManager:
"""
Class for managing the data.
"""
def __init__(self, data_paths, map_file=None, data_rule_file=None, replace_rule_file=None):
"""
Class constructor for DataManager. All inputs are optional
since only name mapping may be used somewhere else.
Inputs:
data_paths: (str) Filepath containing data sources.
map_file: (str, optional) Filepath of name mapping table.
data_rule_file: (str, optional) Filepath for knowledge inferral.
replace_rule_file: (str, optional) Filepath for any replacements.
"""
self.data_paths = data_paths
self.map_file = map_file
self.data_rule_file = data_rule_file
self.replace_rule_file = replace_rule_file
def integrate(self):
"""
Integrate data from multiple sources.
Returns:
pd_integrated: (pd.DataFrame) Integrated data.
"""
list_integrated = []
pd_data_paths = pd.read_csv(self.data_paths, sep='\t')
# iterate over each dataset and perform name mappipng
for _, row in pd_data_paths.iterrows():
source = row['Source']
path = row['Path']
log.info('Processing source %s using %s', source, path)
pd_data = | pd.read_csv(path, '\t') | pandas.read_csv |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy import dtype
from matplotlib.pyplot import ylabel
from matplotlib.cm import ScalarMappable
from matplotlib.pyplot import savefig
import math
from getCpuUsageForStage import *
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i", "--inputFile")
parser.add_argument("-t", "--topFile")
parser.add_argument("-o", "--outputFile")
args = parser.parse_args(sys.argv[1:])
inputFileName = args.inputFile
topFileName = args.topFile
outputFileName = args.outputFile
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 2000) | pandas.set_option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <EMAIL>
# It will merge the price timeseries with the news info timeseries
import pandas as pd
dateparse_prices = lambda x: | pd.datetime.strptime(x, '%m/%d/%Y %H:%M') | pandas.datetime.strptime |
from sklearn import *
import numpy as np
import pandas as panda
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
import string
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from textstat.textstat import *
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import tweepy
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, make_scorer
from time import time
import pickle
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, BaggingClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
import joblib
def predt():
model=joblib.load('cb_sgd_final.sav') #path of classifier
df= | panda.read_csv("./livedata/real_time_tweets.csv") | pandas.read_csv |
import json
import requests
import numpy as np
import altair as alt
import pandas as pd
import mygene
class EnrichR(object):
def __init__(self, list_genes):
self.list_genes = list_genes
self.len_list = len(list_genes)
@classmethod
def get_libraries(self):
"""return active enrichr library name. Official API """
lib_url='https://maayanlab.cloud/Enrichr/datasetStatistics'
response = requests.get(lib_url, verify=True)
if not response.ok:
raise Exception("Error getting the Enrichr libraries")
libs_json = json.loads(response.text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs)
@classmethod
def get_id(self, list_genes):
ENRICHR_URL = 'https://maayanlab.cloud/Enrichr/addList'
genes_str = '\n'.join(list_genes)
description = 'Example gene list'
payload = {
'list': (None, genes_str),
'description': (None, description)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
data = json.loads(response.text)
user_id = data['userListId']
return user_id
@classmethod
def get_enrichment(self, list_genes, library_name='KEGG_2021_Human'):
'''Rank, Term name, P-value, Z-score, Combined score,
Overlapping genes, Adjusted p-value, Old p-value,
Old adjusted p-value. Dafault library is Kegg 2021'''
user_id = self.get_id(list_genes)
ENRICHR_URL = 'https://maayanlab.cloud/Enrichr/enrich'
query_string = '?userListId=%s&backgroundType=%s'
user_list_id = user_id
gene_set_library = library_name
response = requests.get(
ENRICHR_URL + query_string % (user_list_id, gene_set_library))
if not response.ok:
raise Exception('Error fetching enrichment results')
res = json.loads(response.text)
return res, gene_set_library
@classmethod
def get_table_enrichment(self, list_genes, library_name='KEGG_2021_Human'):
dct = self.get_enrichment(list_genes)
df = pd.DataFrame(dct[0][library_name])
df.columns = ['Rank', 'Term name', 'P-value', 'Z-score', 'Combined score',
'Overlapping genes', 'Adjusted p-value', 'Old p-value',
'Old adjusted p-value']
return df
@classmethod
def plot_enrichment(self, list_genes, library_name='KEGG_2021_Human', height = 200, width = 300, max_hits = None):
'''Plot ordered enrichment scores
as -log10(pval)'''
if library_name=='KEGG_2021_Human':
library_name = self.get_enrichment(list_genes)[1]
res = self.get_enrichment(list_genes)[0]
else:
library_name = library_name
res = self.get_enrichment(list_genes, library_name = library_name)[0]
labels=[]
p_val=[]
for i in range(0, len(res[library_name])):
labels.append(res[library_name][i][1])
p_val.append(res[library_name][i][2])
df_plot= | pd.DataFrame(labels,columns=['labels']) | pandas.DataFrame |
from __future__ import print_function
import sys
import pandas as pd
import numpy.matlib
import numpy as np
import scipy
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn
from sklearn import preprocessing
from sklearn import mixture
from sklearn.neighbors.kde import KernelDensity
import glob
import seaborn as sns
import collections
sns.set_context('talk')
sns.set_style('white')
sns.set_style("ticks")
import re
from scipy import sparse, io
import os
import math
import csv
import fbpca
from matplotlib import rcParams
import numpy as np
import scipy.stats as stats
from scipy.stats import gaussian_kde
import statsmodels.api as sm
import statsmodels
from statsmodels.distributions.empirical_distribution import ECDF
#GO imports
#from goatools.obo_parser import GODag
#from goatools.associations import read_ncbi_gene2go
#from goatools.go_enrichment import GOEnrichmentStudy
# The following packages are typically not installed by default in Python installations, but would enable some additional functionality
#import Levenshtein (edit_dist)
#import infomap (info_cluster)
#import networkx as nx (info_cluster)
## progress bar
def update_progress(progress):
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
#######read input############
def read_10x(pathin):
"""Return Pandas Dataframe containing 10x dataset """
mat=scipy.io.mmread(os.path.join(pathin, "matrix.mtx"))
genes_path = os.path.join(pathin, "genes.tsv")
gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_final = [x+'_'+y for x,y in zip(gene_ids,gene_names)]
barcodes_path = os.path.join(pathin, "barcodes.tsv")
barcodes = [row[0][0:14] for row in csv.reader(open(barcodes_path), delimiter="\t")]
DGE=pd.DataFrame(mat.toarray())
DGE.index=gene_final
DGE.columns=barcodes
return DGE
def genenames_from10x(genelist):
"""Return gene names from 10x index generated with read_10x """
genesymbol=[]
#ensemblid=[]
for i in range(len(genelist)):
curgene=genelist[i]
starts=[]
for x in re.finditer('_',curgene):
starts.append(x.start()+1)
genesymbol.append(curgene[starts[-1]:])
return genesymbol#,ensemblid
def genenames_from10x_mod(genelist):
"""Return gene names from 10x index generated with read_10x """
genesymbol=[]
#ensemblid=[]
for i in range(len(genelist)):
curgene=genelist[i]
starts=[]
for x in re.finditer('_',curgene):
starts.append(x.start()+1)
genesymbol.append(curgene[starts[0]:])
return genesymbol#,ensemblid
def collapse2gene(DGE):
DGE_gene=DGE.copy()
DGE_gene.index=genenames_from10x(DGE_gene.index)
DGE_gene=DGE_gene.groupby(DGE_gene.index).sum()
return DGE_gene
def guide2gene(guide):
"""get genename between underscores"""
underscore_pos = []
count=0
if ('INTERGENIC' in guide):
nameout='INTERGENIC'
elif ('_' in guide):
for x in re.finditer('_',guide):
if count<2:
underscore_pos.append(x.span()[1])
nameout=re.sub('sg','',guide[underscore_pos[0]:underscore_pos[1]-1])
else:
nameout=guide
return nameout
def get_batches(cbcs):
"""Return batch - last underscore in column names"""
batchvec=[]
for cell in cbcs:
starts=[]
for x in re.finditer('_',cell):
starts.append(x.start()+1)
batchvec.append(cell[starts[-2]:])
return np.array(batchvec)
def genelevel_dict(GUIDES_DICT):
"""Collapse guide level dictionary to gene level using guide2gene"""
genes=[guide2gene(x) for x in GUIDES_DICT.keys()]
GUIDES_DICT_GENES={}
for gene in genes:
GUIDES_DICT_GENES[gene]=[]
for key in GUIDES_DICT.keys():
GUIDES_DICT_GENES[guide2gene(key)].extend(GUIDES_DICT[key])
return GUIDES_DICT_GENES
####transform data#######
def tp10k_transform(DGE,norm_factor=1.0e4):
"""normalize columns of pandas dataframe to sum to a constant, by default 10,000"""
return(norm_factor*(DGE / DGE.sum()))
def Zcells(DGE):
"""Z transformation of columns of pandas"""
DGEZ=DGE.copy()
DGEZ=pd.DataFrame(sklearn.preprocessing.scale(DGE,axis=0))
DGEZ.index=DGE.index
DGEZ.columns=DGE.columns
return DGEZ
def Zgenes(DGE,batchvec=None):
"""Z transformation of rows of pandas, option for per batch normalization"""
DGEZ=DGE.copy()
if batchvec is None:
DGEZ=pd.DataFrame(sklearn.preprocessing.scale(DGEZ,axis=1))
DGEZ.columns=DGE.columns
DGEZ.index=DGE.index
else:
batch=np.unique(batchvec)
for curbatch in batch:
DGEZ.ix[:,np.array(batchvec)==curbatch]=sklearn.preprocessing.scale(DGEZ.ix[:,np.array(batchvec)==curbatch],axis=1)
return DGEZ
def Zgenes_floor(DGE,floor=0,batchvec=None):
"""Z transformation of rows of pandas dataframe, with flooring of std dev, option for per batch normalization"""
DGEZ=DGE.copy()
if batchvec is None:
curstd=DGE.std(axis=1)+floor
curmean=DGE.mean(axis=1)
curZ=(DGEZ.subtract(curmean,axis=0)).divide(curstd,axis=0)
DGEZ=curZ
DGEZ.columns=DGE.columns
DGEZ.index=DGE.index
else:
batch=np.unique(batchvec)
for curbatch in batch:
curDGE=DGEZ.ix[:,np.array(batchvec)==curbatch]
curstd=curDGE.std(axis=1)+floor
curmean=curDGE.mean(axis=1)
curZ=(curDGE.subtract(curmean,axis=0)).divide(curstd,axis=0)
DGEZ.ix[:,np.array(batchvec)==curbatch]=np.array(curZ)
return DGEZ
def Centergenes(DGE,batchvec=None):
"""Median centering of rows of pandas, option for per batch normalization"""
DGEC=DGE.copy()
if batchvec is None:
DGEC=DGEC.subtract(DGEC.median(axis=1),axis='rows')
else:
batch=np.unique(batchvec)
for curbatch in batch:
DGEC.ix[:,np.array(batchvec)==curbatch]=DGEC.ix[:,np.array(batchvec)==curbatch].subtract(DGEC.ix[:,np.array(batchvec)==curbatch].median(axis=1),axis='rows')
return DGEC
def permute_matrix(DGE,bins=20,verbose=0):
"""Permute genes based on similar expression levels"""
DGE_perm=DGE.copy()
GSUMS=np.sum(DGE,axis=1)
breakvec = np.linspace(1,100,bins)
breaks=[]
for breaker in breakvec:
breaks.append(np.percentile(GSUMS,breaker))
breaks=np.unique(breaks)
for i in range(len(breaks)-1):
if verbose==1:
print(np.round((1.0*i)/(len(breaks)-1)))
for j in range(len(DGE.columns)):
curlogical=np.logical_and(GSUMS>breaks[i],GSUMS<=breaks[i+1])
DGE_perm.ix[curlogical,j]=np.random.permutation(DGE_perm.ix[curlogical,j])
return DGE_perm
def downsample_reads(DF,per_reads=1.0,nrpc=None):
DF_mod=DF.copy()
numgenes=np.shape(DF_mod)[0]
genenames=DF_mod.index
DF_mod.index=range(numgenes)
cells=DF_mod.columns
readspercell=np.sum(DF_mod,axis=0)
totalreads =np.sum(readspercell)
newreads =np.round(totalreads*per_reads)
cellpercents=np.divide(1.0*readspercell,totalreads)
if nrpc:
newreadspercell=nrpc
else:
newreadspercell=[int(x) for x in np.round(np.multiply(cellpercents,newreads))]
DF_out=pd.DataFrame()
for i in range(len(cells)):
vectorize=[]
curcell=DF_mod[cells[i]]
curcell=curcell[curcell!=0]
for j in curcell.index:
vectorize.extend([j]*curcell[j])
vec_sample=np.random.choice(vectorize,size=newreadspercell[i],replace=False)
sampled_vec=np.histogram(vec_sample,bins=range(numgenes+1))[0]
DF_out[cells[i]]=sampled_vec
DF_out.index=genenames
return DF_out
def downsampler(DF,percell=1.0,perreads=1.0):
if percell==1.0:
DF_sampled=DF.copy()
else:
newcells=int(np.round(np.shape(DF)[1]*percell))
DF_sampled=DF.sample(newcells,axis=1)
if perreads==1.0:
return DF_sampled
else:
return downsample_reads(DF_sampled,perreads)
###########generate covariates#########
def dict2X(GUIDES_DICT,cbcs):
"""convert guide cbc dictionary into covariate matrix"""
X=pd.DataFrame()
for key in GUIDES_DICT.keys():
curkey=[]
for cbc in cbcs:
if cbc in GUIDES_DICT[key]:
curkey.append(1)
else:
curkey.append(0)
X[key]=np.array(curkey)
X.index=cbcs
return X
def clusters2X(clusters,cbcs):
"""convert cell cluster cbc dictionary into covariate matrix"""
clusterun=clusters.columns
X=pd.DataFrame(np.zeros((len(cbcs),len(clusterun))))
X.index=cbcs
clusters_intersect=clusters.loc[list(set(clusters.index).intersection(set(cbcs)))]
X.loc[clusters_intersect.index]=clusters_intersect
return X
def Xguides2genes(DF):
Xgene=DF.copy()
Xgene=Xgene.T
Xgene.index=[guide2gene(x) for x in Xgene.index]
Xgene_group=(Xgene.groupby(Xgene.index).sum()>0).sum()
XgeneF=1.0*(Xgene.groupby(Xgene.index).sum()>0).T
return XgeneF
def Y2FlatCov(Y,verbose=0):
ngenes=np.shape(Y)[0]
triuind=np.triu_indices(ngenes)
curnames=Y.index
covgenes=[curnames[x]+'-'+curnames[y] for x,y in zip(triuind[0],triuind[1])]
triu_mask=np.triu(np.ones((ngenes,ngenes))).astype(np.bool)
ncells=np.shape(Y)[1]
i=0
COVout=pd.DataFrame(np.zeros((len(triuind[0]),ncells)))
COVout.columns=Y.columns
for col in Y:
update_progress(np.divide(1.0*i,ncells))
cell=pd.DataFrame(Y[col])
#cell=np.divide(cell,np.linalg.norm(cell))
cellcov=cell.dot(cell.T)
triucellcov=cellcov.where(np.triu(np.ones(cellcov.shape)).astype(np.bool)).values.flatten()
triucellcov=triucellcov[~numpy.isnan(triucellcov)]
COVout[col]=triucellcov
i+=1
COVout.index=covgenes
return COVout
def create_interactions(DF):
"""Take covariate matrix and generate pairwise interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
name1=dfcolumns[i]
name2=dfcolumns[j]
if(i<j):
twonames=np.sort(list(set([str(name1),str(name2)])))
if len(twonames)==2:
INTERACT[str(name1)+'-'+str(name2)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])
groupthese.append(str(twonames[0])+'-'+str(twonames[1]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT=INTERACT>0
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_interactions_nothresh(DF):
"""Take covariate matrix and generate pairwise interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
name1=dfcolumns[i]
name2=dfcolumns[j]
if(i<j):
twonames=np.sort(list(set([str(name1),str(name2)])))
if len(twonames)==2:
INTERACT[str(name1)+'-'+str(name2)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])
groupthese.append(str(twonames[0])+'-'+str(twonames[1]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_3_interactions(DF):
"""Take covariate matrix and generate three-way interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
for k in range(len(dfcolumns)):
if((i<j)&(i<k)):
name1=dfcolumns[i]
name2=dfcolumns[j]
name3=dfcolumns[k]
threenames=np.sort(list(set([str(name1),str(name2),str(name3)])))
if len(threenames)==3:
INTERACT[str(name1)+'-'+str(name2)+'-'+str(name3)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])*np.array(DF.ix[:,k])
groupthese.append(str(threenames[0])+'-'+str(threenames[1])+'-'+str(threenames[2]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1])+'-'+guide2gene(x.split('-')[2]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT=INTERACT>0
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_3_interactions_nothresh(DF):
"""Take covariate matrix and generate three-way interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
for k in range(len(dfcolumns)):
if((i<j)&(i<k)):
name1=dfcolumns[i]
name2=dfcolumns[j]
name3=dfcolumns[k]
threenames=np.sort(list(set([str(name1),str(name2),str(name3)])))
if len(threenames)==3:
INTERACT[str(name1)+'-'+str(name2)+'-'+str(name3)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])*np.array(DF.ix[:,k])
groupthese.append(str(threenames[0])+'-'+str(threenames[1])+'-'+str(threenames[2]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1])+'-'+guide2gene(x.split('-')[2]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT.index=DF.index
return(1.0*INTERACT)
#############Linear Model Stuff########
def cv_rsq(Y,X,k=5,per=0.8,adj=[],relcel=[]):
Y_tmp=Y.copy()
X_tmp=X.copy()
rsq=[]
for i in range(k):
print(i)
numsamples=int(np.round(per*len(Y_tmp)))
train=np.random.choice(range(len(Y_tmp)),size=numsamples,replace=False)
traincells=Y_tmp.index[train]
testcells=list(set(Y_tmp.index)-set(traincells))
print('1',len(testcells))
if len(relcel)>0:
testcells=list(set(testcells).intersection(set(relcel)))
print('2',len(testcells))
Y_train=Y_tmp.loc[traincells]
Y_test=Y_tmp.loc[testcells]
flag=0
X_train=X_tmp.loc[traincells]
X_test=X_tmp.loc[testcells]
lmfit=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)#linear_model.Ridge
lmfit.fit(X_train,Y_train)
if len(adj)>0:
X_train_adj=bayes_cov_col(Y_train,X_train,adj,lmfit)
lmfit.fit(X_train_adj,Y_train)
X_test_adj=bayes_cov_col(Y_test,X_test,adj,lmfit)
rsq.append(lmfit.score(X_test_adj,Y_test))
else:
rsq.append(lmfit.score(X_test,Y_test))
return rsq
def marginal_covariates(y,x,k=4,percent=0.8):
"""Input is observations and list of covariates
like guides, qc, batch, guide interactions, cell types, cell type interactions
perform k-fold CV on xx percent of data
for each of the 2^n combinations of covariates
"""
if isinstance(x,list):
numsamples=int(np.round(percent*len(y)))
X=pd.concat(x,axis=1)
# rsqall=[]
# for i in range(k):
# print(i)
# train=np.random.choice(range(len(y)),size=numsamples,replace=False)
# traincells=y.index[train]
# testcells=list(set(y.index)-set(traincells))
# X_train=X.loc[traincells]
# Y_train=y.loc[traincells]
# X_test=X.loc[testcells]
# Y_test=y.loc[testcells]
# enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0012,max_iter=10000)
# enet.fit(X_train,Y_train)
# print('model has been fit')
# rsqall.append(enet.score(X_test,Y_test))
rsqind=[]
big_resid=[]
for j in range(len(x)):
print(j)
rsqk=[]
for i in range(k):
print(k)
train=np.random.choice(range(len(y)),size=numsamples,replace=False)
traincells=y.index[train]
testcells=list(set(y.index)-set(traincells))
Y_train=y.loc[traincells]
Y_test=y.loc[testcells]
flag=0
if j==0:
X_train=x[j].loc[traincells]
X_test=x[j].loc[testcells]
lmfit=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)
else:
X=pd.concat(x[0:j],axis=1)
X_train=X.loc[traincells]
X_test=X.loc[testcells]
lmfit.fit(X_train,Y_train)
rsqk.append(lmfit.score(X_test,Y_test))
Yhat=lmfit.predict(X_test)
if flag==0:
df_resid=Yhat-Y_test
flag=1
else:
df_resid = (df_resid + (Yhat-Y_test)) / 2.0
rsqind.append(rsqk)
big_resid.append(df_resid)
else:
print('x is not a list')
return
#df_rsq=pd.concat([pd.DataFrame(rsqind)],axis=0)
return rsqind
def crosscov_interactions(X1,X2):
cols1=X1.columns
cols2=X2.columns
Xout=pd.DataFrame()
for i in range(len(cols1)):
for j in range(len(cols2)):
if i>j:
curi=cols1[i]
curj=cols2[j]
Xout[str(curi)+'_'+str(curj)]=X1[curi]*X2[curj]
return Xout
def nonzeroX2dict(X):
dict_out={}
for col in X.columns:
curcol=X[col]
dict_out[col]=curcol[curcol>0].index
return dict_out
def bayes_cov_col(Y,X,cols,lm):
"""
@Y = Expression matrix, cells x x genes, expecting pandas dataframe
@X = Covariate matrix, cells x covariates, expecting pandas dataframe
@cols = The subset of columns that the EM should be performed over, expecting list
@lm = linear model object
"""
#EM iterateit
Yhat=pd.DataFrame(lm.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
SSE_all=np.square(Y.subtract(Yhat))
X_adjust=X.copy()
df_SSE = []
df_logit = []
for curcov in cols:
curcells=X[X[curcov]>0].index
if len(curcells)>2:
X_notcur=X.copy()
X_notcur[curcov]=[0]*len(X_notcur)
X_sub=X_notcur.loc[curcells]
Y_sub=Y.loc[curcells]
GENE_var=2.0*Y_sub.var(axis=0)
vargenes=GENE_var[GENE_var>0].index
Yhat_notcur=pd.DataFrame(lm.predict(X_sub))
Yhat_notcur.index=Y_sub.index
Yhat_notcur.columns=Y_sub.columns
SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))
SSE=SSE_all.loc[curcells].subtract(SSE_notcur)
SSE_sum=SSE.sum(axis=1)
SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)
logitify=np.divide(1.0,1.0+np.exp(SSE_transform))#sum))
df_SSE.append(SSE_sum)
df_logit.append(logitify)
X_adjust[curcov].loc[curcells]=logitify
return X_adjust
def run_model(Y,X,EM_DICT=None,verbose=0,modalpha=0.0005,removecells=1):
"""
@Y = Expression matrix, cellx x genes, expecting pandas dataframe
@X = Covariate matrix, cells x covariates, expecting pandas dataframe
@EM_DICT = A dictionary of cell labels for each perturbation to perform the EM-step over, expecting dict
"""
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)
enet.fit(X,Y)
if verbose==1:
print(enet.score(X,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X.columns
Be.index=Y.columns
#EM iterateit
Yhat=pd.DataFrame(enet.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
SSE_all=np.square(Y.subtract(Yhat))
X_adjust=X.copy()
X_adjust['unperturbed']=[0]*len(X)
df_SSE = []
df_logit = []
df_pf = []
if EM_DICT is not None:
for curcov in EM_DICT.keys():
curcells=EM_DICT[curcov]
X_notcur=X.copy()
X_notcur[curcov]=[0]*len(X_notcur)
X_sub=X_notcur.loc[curcells]
Y_sub=Y.loc[curcells]
GENE_var=2.0*Y_sub.var(axis=0)
vargenes=GENE_var[GENE_var>0].index
Yhat_notcur=pd.DataFrame(enet.predict(X_sub))
Yhat_notcur.index=Y_sub.index
Yhat_notcur.columns=Y_sub.columns
SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))
SSE=SSE_all.loc[curcells].subtract(SSE_notcur)
SSE_sum=SSE.sum(axis=1)
SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)
logitify=np.divide(1.0,1.0+np.exp(SSE_sum))#SSE_transform))#sum))
df_SSE.append(SSE_sum)
df_logit.append(logitify)
pf=np.mean(logitify>0.99)
if verbose==1:
print(curcov,pf)
df_pf.append([curcov,pf])
weak_perturb=1.0*(logitify<0.1)
X_adjust[curcov].loc[curcells]=logitify
X_adjust['unperturbed'].loc[curcells]=weak_perturb
print('done with EM')
#refit model
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)
if removecells==1:
goodcells=X_adjust['unperturbed']!=1
print(np.mean(goodcells))
Y=Y[goodcells]
X_adjust=X[goodcells]
enet.fit(X_adjust,Y)
Yhat=pd.DataFrame(enet.predict(X_adjust))
Yhat.index=Y.index
Yhat.columns=Y.columns
if verbose==1:
print(enet.score(X_adjust,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X_adjust.columns
Be.index=Y.columns
RES_out=Y.subtract(Yhat)
if EM_DICT is not None:
return(Be,X_adjust,RES_out,df_pf)#,df_SSE,df_logit)
return(Be,X_adjust,RES_out)#,df_SSE,df_logit)
def run_model_bycol(Y,X,EM_cols=None,modalpha=0.005,verbose=0):
"""
@Y = Expression matrix, expecting pandas dataframe, cells x genes
@X = Covariate matrix, expecting pandas dataframe, cells x covariates
@EM_cols = The subset of columns that the EM should be performed over, list
"""
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)
enet.fit(X,Y)
if verbose==1:
print(enet.score(X,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X.columns
Be.index=Y.columns
Yhat=pd.DataFrame(enet.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
if EM_cols is not None:
X_adjust=bayes_cov_col(Y,X,EM_cols,enet)
#print('done with EM')
#refit model
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0004,max_iter=10000)
enet.fit(X_adjust,Y)
Yhat=pd.DataFrame(enet.predict(X_adjust))
Yhat.index=Y.index
Yhat.columns=Y.columns
if verbose==1:
print(enet.score(X_adjust,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X_adjust.columns
Be.index=Y.columns
else:
X_adjust=X.copy()
RES_out=Y.subtract(Yhat)
return(Be,X_adjust,RES_out)
def count_27(B1,B2,B3,thresh=0.01):
vecs1=[B1<(-thresh),np.abs(B1)<=thresh,B1>thresh]
vecs2=[B2<(-thresh),np.abs(B2)<=thresh,B2>thresh]
vecs3=[B3<(-thresh),np.abs(B3)<=thresh,B3>thresh]
COUNTER=[]
for i in range(3):
for j in range(3):
for k in range(3):
COUNTER.append(np.sum(np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])))
return COUNTER
def return_sorted_list(in1):
output = [0] * len(in1)
for i, x in enumerate(sorted(range(len(in1)), key=lambda y: in1[y])):
output[x] = i
return np.array(output)
def index_27(B1,B2,B3,df_order,thresh=0.01):
vecs1=[B1<(-thresh),np.abs(B1)<=thresh,B1>thresh]
vecs2=[B2<(-thresh),np.abs(B2)<=thresh,B2>thresh]
vecs3=[B3<(-thresh),np.abs(B3)<=thresh,B3>thresh]
Ball=pd.concat([B1,B2,B3],axis=1)
iarray=pd.DataFrame(['none']*len(B1))
iarray.index=B1.index
for i in range(3):
for j in range(3):
for k in range(3):
totsum=int(np.sum(np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])))
iarray[np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])]=str(i-1)+' '+str(j-1)+' '+str(k-1)
iarray['type']=['none']*len(B1)
iarray['order']=[0]*len(B1)
iarray['effect']=[0]*len(B1)
numbering=0
for i in range(len(df_order)):
curgroup=df_order.index[i]
curtype=df_order.ix[i,'type']
matches=iarray[0]==curgroup
nummatches=np.sum(matches)
if nummatches>0:
Bmatch=Ball[matches]
intarray=[int(x) for x in curgroup.split(' ')]
Bmod=Bmatch.copy()
l=0
for col in Bmod.columns:
Bmod[col]=intarray[l]*Bmod[col]
l+=1
Bsum=pd.DataFrame(Bmod.sum(axis=1))
ordervec=return_sorted_list(-np.array(Bsum[0]))
ordervec=ordervec+numbering
iarray.ix[matches,'type']=curtype
iarray.ix[matches,'order']=ordervec
iarray.ix[matches,'effect']=np.array(Bsum[0])
numbering+=np.max(ordervec)+1
return iarray
def hyper_overlap(genes1,genes2,M):
curoverlap=genes1.intersection(genes2)
x=len(curoverlap)
n=len(genes1)
N=len(genes2)
pval=1.0-scipy.stats.hypergeom.cdf(x,M, n, N)
return pval
def hyper_category(df_cats,genes_in):
pvals=[]
cat_un=np.unique(df_cats[0])
genes2=set(genes_in).intersection(set(df_cats.index))
for cat in cat_un:
genes1=set(df_cats[df_cats[0]==cat].index)
pvals.append(hyper_overlap(genes1,genes2,len(df_cats)))
df_pvals=pd.DataFrame(-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(pvals)[1]))
df_pvals.index=cat_un
return df_pvals
def numbins(x):
iqr=((np.percentile(x, 75) - np.percentile(x, 25)))
if iqr==0.0:
return int(np.ceil(np.sqrt(len(x))))
else:
bins=int(np.ceil((np.max(x)-np.min(x))/((iqr)/np.power(len(x),0.33333333))))
return bins
def get_1sidepval(B,joint,edges,gsums,gvar,nguides):
Bpval=B.copy()
#create index lookup for each gene to the pairs
genevec=np.array(range(len(gsums)))
guidevec=np.array(range(len(nguides)))
gsums=np.array(gsums)
gvar=np.array(gvar)
nguides=np.array(nguides)
rowindex_dict={}
colindex_dict={}
for i in range(len(edges[0])-1):
for j in range(len(edges[1])-1):
logical_gsums=np.logical_and(gsums>=edges[0][i],gsums<edges[0][i+1])
logical_gvar=np.logical_and(gvar>=edges[1][j],gvar<edges[1][j+1])
logical_both=np.logical_and(logical_gsums,logical_gvar)
if np.sum(logical_both)>0:
rowindex_dict[(i,j)]=genevec[logical_both]
for i in range(len(edges[2])-1):
logical_nguides=np.logical_and(nguides>=edges[2][i],nguides<edges[2][i+1])
if np.sum(logical_nguides)>0:
colindex_dict[i]=guidevec[logical_nguides]
maxedges=len(edges[3])-2
for key in rowindex_dict.keys():
for guidekey in colindex_dict.keys():
curjoint=joint[key[0]][key[1]][guidekey]
curjoint /= curjoint.sum()
curjoint= | pd.DataFrame(curjoint) | pandas.DataFrame |
# 参考: https://www.python.ambitious-engineer.com/archives/1630
# 参考: https://note.com/kamakiriphysics/n/n2aec5611af2a
# 参考: https://qiita.com/Gen6/items/2979b84797c702c858b1
import os
from datetime import datetime
from flask import Flask, render_template, request, redirect, url_for, send_from_directory, g, flash
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import shutil
import argparse
import pathlib
import numpy as np
from numpy import random
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.express as px
import plotly.offline as offline
from PIL import Image
import cv2
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True,force_reload=True)
import torchvision
# https://stackoverflow.com/questions/68140388/an-error-cache-may-be-out-of-date-try-force-reload-true-comes-up-even-thou
import torch.backends.cudnn as cudnn
from pathlib import Path
# graphファイル削除用
def remove_glob(pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
if os.path.isfile(p):
os.remove(p)
# mp4から画像を抽出
def save_frame_sec(video_path, sec, result_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return
os.makedirs(os.path.dirname(result_path), exist_ok=True)
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, round(fps * sec))
ret, frame = cap.read()
if ret:
cv2.imwrite(result_path, frame)
# 物体検出
def dtc_grph_label(img_ad,img_dtct,dtct_lbl,i):
img = [img_ad]
#model = torch.hub.load('ultralytics/yolov5', 'custom', path='static/yolov5s.pt',force_reload=True)
results = model(img)
# plotデータの整理
detect = results.pandas().xyxy[0]
detect['x'] = (detect.xmin + detect.xmax)/2
detect['y'] = (detect.ymin + detect.ymax)/2
detect['size'] = np.sqrt((detect.xmax - detect.xmin)*(detect.ymax - detect.ymin))
detect['frame'] = i
#グラフ作成
fig = plt.figure(figsize=(8, 8))
# fig = plt.figure()
sns.scatterplot(data=detect, x='x', y='y', hue='name',size = detect['size']*100,alpha = 0.5,sizes=(100,500))
plt.xlim(0,np.array(img).shape[2])
plt.ylim(np.array(img).shape[1],0)
#画像の読み込み https://qiita.com/zaburo/items/5637b424c655b136527a
im = Image.open(img_ad)
#画像をarrayに変換
im_list = np.asarray(im)
#貼り付け
plt.imshow(im_list, alpha=1.0)
#表示
plt.axis("off") #https://qiita.com/tsukada_cs/items/8d31a25cd7c860690270
plt.imshow(im, alpha=0.6)
if np.array(img).shape[2] > np.array(img).shape[1]:
plt.legend(bbox_to_anchor=(0, -0.1), loc='upper left', borderaxespad=0, fontsize=8)
else:
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0, fontsize=8)
plt.savefig(img_dtct+'/'+img_ad.split('.')[-2].split('/')[-1]+'_detect.png')
detect.to_csv(dtct_lbl+'/'+img_ad.split('.')[-2].split('/')[-1]+'_label.csv')
app = Flask(__name__)
# ファイル容量を制限する
# https://tanuhack.com/flask-client2server/
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024 #5MB
SAVE_DIR = "graph"
if not os.path.isdir(SAVE_DIR):
os.mkdir(SAVE_DIR)
@app.route('/graph/<path:filepath>')
def send_js(filepath):
return send_from_directory(SAVE_DIR, filepath)
@app.route("/", methods=["GET","POST"])
def upload_file():
if request.method == "GET":
return render_template("index.html")
if request.method == "POST":
image = request.files['image']
if image:
remove_glob('./upload/**')
app.logger.info('file_name={}'.format(image.filename))
app.logger.info('content_type={} content_length={}, mimetype={}, mimetype_params={}'.format(
image.content_type, image.content_length, image.mimetype, image.mimetype_params))
#imagefile_en = image.filename.encode('utf-8')
image.save("./upload/"+image.filename)
video_path = "./upload/"+image.filename
video_2_jpg_path = './images/frame'
img_dtct = './images/detect'
dtct_lbl = './images/labels'
remove_glob(video_2_jpg_path+'/**')
remove_glob(img_dtct+'/**')
# ファイルの情報抽出
cap = cv2.VideoCapture(video_path)
video_frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
video_fps = cap.get(cv2.CAP_PROP_FPS)
video_len_sec = video_frame_count / video_fps
print('sec:',video_len_sec)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
print('width:',width)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('height:',height)
# 処理開始前に不要データ削除
remove_glob(video_2_jpg_path+'/**')
remove_glob(img_dtct+'/**')
remove_glob(dtct_lbl+'/**')
# framem→jpg→png/csv
stp = 0.5 #stp[sec]に一枚画像取得
nomax ='{0:04d}'.format(int(len(np.arange(0,video_len_sec//1+stp,stp)))-1)
for i,sec in enumerate(np.arange(0,video_len_sec//1+stp,stp)): #再生時間(秒)切り上げ(c//1+1で切り上げ)
no = '{0:04d}'.format(i)
save_frame_sec(video_path, sec, video_2_jpg_path+'/'+no+'.jpg')
dtc_grph_label(video_2_jpg_path+'/'+no+'.jpg',img_dtct,dtct_lbl,i)
print(no,'/',nomax)
remove_glob(video_2_jpg_path+'/**')
# gifの元情報pngファイル確認
files = sorted(glob.glob(img_dtct+'/*.png'))
images = list(map(lambda file: Image.open(file), files))
# 古いgifファイル削除
remove_glob('./graph/**')
#gifファイル作成
filepath = "./graph/" + datetime.now().strftime("%Y%m%d%H%M%S_") + "out.gif"
print(filepath)
images[0].save(filepath, save_all=True, append_images=images[1:], duration=400, loop=0)
# labelファイル抽出・統合
df = pd.DataFrame()
for file_path in pathlib.Path(dtct_lbl).glob('*.csv'):
f_path = pathlib.Path(file_path)
file_name = f_path.name
df_tmp = pd.read_csv(dtct_lbl+'/'+file_name)
df = | pd.concat([df, df_tmp], axis=0) | pandas.concat |
from unittest import TestCase
from unittest.mock import (
ANY,
Mock,
patch,
)
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pypika import Order
from fireant.queries.pagination import paginate
from fireant.tests.dataset.mocks import (
dimx2_date_bool_df,
dimx2_date_str_df,
dimx2_str_num_df,
dimx3_date_str_str_df,
)
TS = "$timestamp"
mock_table_widget = Mock()
mock_table_widget.group_pagination = False
mock_chart_widget = Mock()
mock_chart_widget.group_pagination = True
mock_dimension_definition = Mock()
mock_dimension_definition.alias = "$political_party"
mock_metric_definition = Mock()
mock_metric_definition.alias = "$votes"
class SimplePaginationTests(TestCase):
@patch("fireant.queries.pagination._simple_paginate")
def test_that_with_no_widgets_using_group_pagination_that_simple_pagination_is_applied(self, mock_paginate):
paginate(dimx2_date_str_df, [mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
@patch("fireant.queries.pagination._simple_paginate")
def test_that_with_group_pagination_and_one_dimension_that_simple_pagination_is_applied(self, mock_paginate):
paginate(dimx2_str_num_df, [mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
def test_paginate_with_limit_slice_data_frame_to_limit(self):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], limit=5)
expected = dimx2_date_str_df[:5]
assert_frame_equal(expected, paginated)
def test_paginate_with_offset_slice_data_frame_from_offset(self):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], offset=5)
expected = dimx2_date_str_df[5:]
assert_frame_equal(expected, paginated)
def test_paginate_with_limit_and_offset_slice_data_frame_from_offset_to_offset_plus_limit(
self,
):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], limit=5, offset=5)
expected = dimx2_date_str_df[5:10]
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_dimension_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_dimension_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_dimension_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_dimension_definition.alias], ascending=False)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=False)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_multiple_orders(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[
(mock_dimension_definition, Order.asc),
(mock_metric_definition, Order.desc),
],
)
expected = dimx2_date_str_df.sort_values(
by=[mock_dimension_definition.alias, mock_metric_definition.alias],
ascending=[True, False],
)
assert_frame_equal(expected, paginated)
def test_apply_sort_before_slice(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.asc)],
limit=5,
offset=5,
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=True)[5:10]
assert_frame_equal(expected, paginated)
class GroupPaginationTests(TestCase):
@patch("fireant.queries.pagination._group_paginate")
def test_with_one_widget_using_group_pagination_that_group_pagination_is_applied(self, mock_paginate):
paginate(dimx2_date_str_df, [mock_chart_widget, mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
def test_paginate_with_limit_slice_data_frame_to_limit_in_each_group(self):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], limit=2)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][:2]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex).dropna().astype(np.int64)
assert_frame_equal(expected, paginated)
def test_paginate_with_offset_slice_data_frame_from_offset_in_each_group(self):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], offset=2)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][2:]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex)
assert_frame_equal(expected, paginated)
def test_paginate_with_limit_and_offset_slice_data_frame_from_offset_to_offset_plus_limit_in_each_group(
self,
):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], limit=1, offset=1)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][1:2]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex).dropna().astype(np.int64)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_dimension_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[TS, mock_dimension_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_dimension_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[TS, mock_dimension_definition.alias], ascending=(True, False))
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.iloc[[1, 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.desc)],
)
expected = dimx2_date_str_df.iloc[[2, 0, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11]]
assert_frame_equal(expected, paginated)
def test_apply_sort_multiple_levels_df(self):
paginated = paginate(
dimx3_date_str_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.asc)],
)
sorted_groups = dimx3_date_str_str_df.groupby(level=[1, 2]).sum().sort_values(by="$votes", ascending=True).index
expected = (
dimx3_date_str_str_df.groupby(level=0)
.apply(lambda df: df.reset_index(level=0, drop=True).reindex(sorted_groups))
.dropna()
)
metrics = ["$votes", "$wins", "$wins_with_style", "$turnout"]
expected[metrics] = expected[metrics].astype(np.int64)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_multiple_orders(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[
(mock_dimension_definition, Order.asc),
(mock_metric_definition, Order.desc),
],
)
expected = dimx2_date_str_df.sort_values(
by=[TS, mock_dimension_definition.alias, mock_metric_definition.alias],
ascending=[True, True, False],
)
assert_frame_equal(expected, paginated)
def test_apply_sort_before_slice(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
limit=1,
offset=1,
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.iloc[[0, 3, 5, 7, 9, 11]]
assert_frame_equal(expected, paginated)
def test_group_paginate_with_bool_dims__no_pagination(self):
# This test does not apply any pagination but checks that none of the dimension values get lost
expected = dimx2_date_bool_df
paginated = paginate(dimx2_date_bool_df, [mock_chart_widget])
| assert_frame_equal(expected, paginated) | pandas.testing.assert_frame_equal |
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [pd.Timestamp("2019-01-01"), pd.NaT]})
return df
@pytest.fixture
def df_from_spark(spark):
from pyspark.sql import types
values = collections.OrderedDict(
{"int": [1, 2, None],
"smallint": [1, 2, None],
"bigint": [1, 2, None],
"bool": [True, False, None],
"single": [1.0, NaN, None],
"double": [1.0, NaN, None],
"str": ["foo", "bar", None],
"datetime": [datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
None],
"date": [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
None],
"map": [{"foo": "bar"}, {"bar": "foo"}, None],
"array": [[1, 2, 3], [3, 4, 5], None]}
)
data = list(zip(*values.values()))
c = types.StructField
columns = [c("int", types.IntegerType()),
c("smallint", types.ShortType()),
c("bigint", types.LongType()),
c("bool", types.BooleanType()),
c("single", types.FloatType()),
c("double", types.DoubleType()),
c("str", types.StringType()),
c("datetime", | types.TimestampType() | pandas.api.types.TimestampType |
import logging
import logging.config
import multiprocessing
import os
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Union, Dict, Set, List
from gensim.models import Doc2Vec
from joblib import Parallel, delayed
from scipy.stats import stats
from sklearn import metrics
from statsmodels.sandbox.stats.multicomp import TukeyHSDResults
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from tqdm import tqdm
from lib2vec.doc2vec_structures import DocumentKeyedVectors
from lib2vec.corpus_structure import Corpus, Utils, ConfigLoader
from lib2vec.document_segments import chunk_documents
from lib2vec.vectorization import Vectorizer
import random
import pandas as pd
import numpy as np
from lib2vec.vectorization_utils import Vectorization
from extensions.word_movers_distance import WordMoversDistance
class EvaluationMath:
@staticmethod
def mean(results: Union[np.ndarray, List[Union[float, int]]], std: bool = True):
if not isinstance(results, np.ndarray):
results = np.array(results)
# assert np.mean(results) == sum(results) / len(results)
if std:
return np.mean(results), np.std(results)
else:
return np.mean(results)
@staticmethod
def median(results: Union[np.ndarray, List[Union[float, int]]], std: bool = True):
if not isinstance(results, np.ndarray):
results = np.array(results)
if std:
return np.median(results), stats.iqr(results)
else:
return np.median(results)
@staticmethod
def one_way_anova(list_results: Dict[str, np.ndarray]):
def replace_sig_indicator(inp: str):
if len(inp) > 0:
inp = f'{",".join([str(i) for i in sorted([int(s) for s in inp.split(",")])])}'
return inp
vals = [values for values in list_results.values()]
f, p = stats.f_oneway(*vals)
significance_dict = defaultdict(str)
tuples = []
for group, values in list_results.items():
for value in values:
tuples.append((group, value))
# print(group, Evaluation.mean(values))
# print(tuples)
df = pd.DataFrame(tuples, columns=['Group', 'Value'])
try:
m_comp: TukeyHSDResults = pairwise_tukeyhsd(endog=df['Value'], groups=df['Group'], alpha=0.05)
except ValueError:
list_results.keys()
return {key: "" for key in list_results.keys()}
m_comp_data = m_comp.summary().data
mcomp_df = pd.DataFrame(m_comp_data[1:], columns=m_comp_data[0])
group_id_lookup = {key: i + 1 for i, key in enumerate(list_results.keys())}
for i, row in mcomp_df.iterrows():
if row['reject'] and p < 0.05:
g1_commata = ''
g2_commata = ''
if len(significance_dict[row['group1']]) > 0:
g1_commata = ','
if len(significance_dict[row['group2']]) > 0:
g2_commata = ','
significance_dict[row['group1']] += f"{g1_commata}{group_id_lookup[row['group2']]}"
significance_dict[row['group2']] += f"{g2_commata}{group_id_lookup[row['group1']]}"
else:
significance_dict[row['group1']] += ""
significance_dict[row['group2']] += ""
# print(f, p)
significance_dict = {key: replace_sig_indicator(value) for key, value in significance_dict.items()}
return significance_dict
@staticmethod
def t_test(list_results: Dict[str, np.ndarray]):
outer_dict = {}
for algorithm_1, values_1 in list_results.items():
inner_dict = {}
for algorithm_2, values_2 in list_results.items():
t, p = stats.ttest_ind(values_1, values_2)
# print(algorithm_1, algorithm_2, t, p)
if values_1.mean() > values_2.mean():
ba = ">"
elif values_1.mean() == values_2.mean():
ba = "="
else:
ba = "<"
if p < 0.05:
inner_dict[algorithm_2] = f"s{ba}"
else:
inner_dict[algorithm_2] = f"n{ba}"
outer_dict[algorithm_1] = inner_dict
return outer_dict
class EvaluationTask(ABC):
def __init__(self, reverted: Dict[str, str], corpus: Corpus, topn):
self.reverted = reverted
self.corpus = corpus
self.topn = topn - 1
self.correct = []
self.uncorrect = []
self.truth = {}
@abstractmethod
def has_passed(self, doc_id: str, sim_doc_id: str):
pass
@abstractmethod
def ground_truth(self, doc_id: str):
pass
@abstractmethod
def nr_of_possible_matches(self, doc_id: str):
pass
def __str__(self):
return self.__class__.__name__
__repr__ = __str__
def store_passed_results(self, passed, doc_id, sim_doc_id):
if passed:
self.correct.append((doc_id, sim_doc_id))
else:
self.uncorrect.append((doc_id, sim_doc_id))
if doc_id not in self.truth:
self.truth = self.ground_truth(doc_id)
@staticmethod
def create_from_name(task_name: str, reverted: Dict[str, str], corpus: Corpus, topn: int):
if task_name.lower() == "seriestask" or task_name.lower() == "series_task" or task_name.lower() == "series":
# if corpus.series_dict is None:
# raise UserWarning("No series dictionary found for corpus!")
return SeriesTask(reverted, corpus, topn)
elif task_name.lower() == "authortask" or task_name.lower() == "author_task" or task_name.lower() == "author":
return AuthorTask(reverted, corpus, topn)
elif task_name.lower() == "genretask" or task_name.lower() == "genre_task" or task_name.lower() == "genre":
return GenreTask(reverted, corpus, topn)
else:
raise UserWarning(f"{task_name} is not defined as task")
class SeriesTask(EvaluationTask):
def ground_truth(self, doc_id):
return self.corpus.series_dict[self.reverted[doc_id]]
def has_passed(self, doc_id: str, sim_doc_id: str):
try:
passed = self.reverted[doc_id] == self.reverted[sim_doc_id]
self.store_passed_results(passed, doc_id, sim_doc_id)
return passed
except KeyError:
if sim_doc_id not in self.reverted:
return False
else:
print(doc_id, sim_doc_id, self.reverted)
raise UserWarning("No proper series handling")
# return True
def nr_of_possible_matches(self, doc_id: str):
try:
real_matches = len(self.ground_truth(doc_id)) - 1
if real_matches > self.topn:
return self.topn
return real_matches
except KeyError:
raise UserWarning("No proper series handling")
# return 0
class AuthorTask(EvaluationTask):
def ground_truth(self, doc_id):
return self.corpus.get_other_doc_ids_by_same_author(doc_id)
def has_passed(self, doc_id: str, sim_doc_id: str):
passed = self.corpus.documents[doc_id].authors == self.corpus.documents[sim_doc_id].authors
self.store_passed_results(passed, doc_id, sim_doc_id)
return passed
def nr_of_possible_matches(self, doc_id: str):
real_matches = len(self.ground_truth(doc_id))
# print(real_matches, doc_id, self.corpus.get_other_doc_ids_by_same_author(doc_id))
if real_matches > self.topn:
return self.topn
return real_matches
class GenreTask(EvaluationTask):
def ground_truth(self, doc_id):
return self.corpus.get_other_doc_ids_by_same_genres(doc_id)
def has_passed(self, doc_id: str, sim_doc_id: str):
passed = self.corpus.documents[doc_id].genres == self.corpus.documents[sim_doc_id].genres
self.store_passed_results(passed, doc_id, sim_doc_id)
return passed
def nr_of_possible_matches(self, doc_id: str):
real_matches = len(self.ground_truth(doc_id))
# real_matches = len(self.corpus.get_other_doc_ids_by_same_genres(doc_id))
if real_matches > self.topn:
return self.topn
return real_matches
class EvaluationMetric:
@staticmethod
def precision(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# how many selected items are relevant?
hard_correct = 0
# soft_correct = 0
# print(reverted)
if k is None:
k = len(sim_documents)
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for c, (sim_doc_id, _) in enumerate(sim_documents):
if c == k + 1:
break
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
hard_correct += 1
# print(task, c, k, hard_correct, doc_id, sim_doc_id)
hard_correct = hard_correct / k
return hard_correct
@staticmethod
def length_metric(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
differences = []
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
# doc_len = len(task.corpus.documents[doc_id].get_flat_document_tokens())
doc_len = task.corpus.documents[doc_id].length # len(task.corpus.documents[doc_id].get_flat_tokens_from_disk())
for c, (sim_doc_id, _) in enumerate(sim_documents):
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
# sim_doc_len = len(task.corpus.documents[sim_doc_id].get_flat_document_tokens())
sim_doc_len = task.corpus.documents[sim_doc_id].length
differences.append(abs(doc_len - sim_doc_len) / doc_len)
# print(task, c, k, hard_correct, doc_id, sim_doc_id)
mape = sum(differences) / len(differences) * 100
return mape
@staticmethod
def fair_precision(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# how many selected items are relevant?
if task.nr_of_possible_matches(doc_id) == 0:
# print('zero devision fix at fair_precision')
return None
hard_correct = 0
# soft_correct = 0
# print(reverted)
if k is None:
k = len(sim_documents)
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for c, (sim_doc_id, _) in enumerate(sim_documents):
if c == k + 1:
break
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
hard_correct += 1
# if corpus.documents[doc_id].authors == corpus.documents[sim_doc_id].authors:
# soft_correct += 1
if k > task.nr_of_possible_matches(doc_id):
k = task.nr_of_possible_matches(doc_id)
hard_correct = hard_correct / k
# soft_correct = soft_correct / len(sim_documents)
return hard_correct
@staticmethod
def recall(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# how many relevant items are selected?
if task.nr_of_possible_matches(doc_id) == 0:
# print('zero devision fix at recall')
return None
hard_correct = 0
# soft_correct = 0
# print(reverted)
if k is None:
k = len(sim_documents)
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for c, (sim_doc_id, _) in enumerate(sim_documents):
if c == k + 1:
break
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
hard_correct += 1
# if corpus.documents[doc_id].authors == corpus.documents[sim_doc_id].authors:
# soft_correct += 1
hard_correct = hard_correct / task.nr_of_possible_matches(doc_id)
# soft_correct = soft_correct / len(sim_documents)
return hard_correct
@staticmethod
def fair_recall(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# how many relevant items are selected?
if task.nr_of_possible_matches(doc_id) == 0:
# print('zero devision fix at recall')
return None
hard_correct = 0
# soft_correct = 0
# print(reverted)
if k is None:
k = len(sim_documents)
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for c, (sim_doc_id, _) in enumerate(sim_documents):
if c == k + 1:
break
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
hard_correct += 1
# if corpus.documents[doc_id].authors == corpus.documents[sim_doc_id].authors:
# soft_correct += 1
relevant_items = task.nr_of_possible_matches(doc_id)
if k is None:
k = 100
if relevant_items > k:
relevant_items = k
hard_correct = hard_correct / relevant_items
# soft_correct = soft_correct / len(sim_documents)
return hard_correct
@staticmethod
def ap(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# print(reverted)
k = 1
prec_values_at_k = []
correct_ones = []
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for sim_doc_id, _ in sim_documents:
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
correct_ones.append(k)
prec_values_at_k.append(len(correct_ones) / k)
k += 1
if len(prec_values_at_k) > 0:
ap = sum(prec_values_at_k) / len(prec_values_at_k)
else:
ap = 0
return ap
@staticmethod
def mrr(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
c = 1
if doc_id[-1].isalpha():
doc_id = '_'.join(doc_id.split('_')[:-1])
for sim_doc_id, _ in sim_documents:
if sim_doc_id[-1].isalpha():
sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
if not ignore_same or doc_id != sim_doc_id:
if task.has_passed(doc_id, sim_doc_id):
return 1 / c
c += 1
return 0
@staticmethod
def ndcg(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
# print(task, doc_id, task.corpus.get_other_doc_ids_by_same_author(doc_id), task.nr_of_possible_matches(doc_id))
if task.nr_of_possible_matches(doc_id) == 0:
# print('zero devision fix at ndcg')
return None, {}, {}
# print(reverted)
ground_truth_values = []
predicted_values = []
replaced_doc_id = doc_id
id_annontation = defaultdict(list)
if doc_id[-1].isalpha():
replaced_doc_id = '_'.join(doc_id.split('_')[:-1])
for c, (sim_doc_id, sim) in enumerate(sim_documents):
replaced_sim_doc_id = sim_doc_id
if sim_doc_id[-1].isalpha():
replaced_sim_doc_id = '_'.join(sim_doc_id.split('_')[:-1])
# print(doc_id, sim_doc_id, replaced_doc_id, replaced_sim_doc_id, sim)
if not ignore_same or replaced_doc_id != replaced_sim_doc_id:
# print('matches:', task.nr_of_possible_matches(doc_id))
# if c <= task.nr_of_possible_matches(doc_id):
if sum(ground_truth_values) < task.nr_of_possible_matches(doc_id):
ground_truth_values.append(1)
else:
ground_truth_values.append(0)
if task.has_passed(replaced_doc_id, replaced_sim_doc_id):
predicted_values.append(1)
id_annontation[replaced_doc_id].append((replaced_sim_doc_id, 1))
else:
predicted_values.append(0)
id_annontation[replaced_doc_id].append((replaced_sim_doc_id, 0))
else:
if c != 0:
print(f'First match ({c}) is not lookup document {doc_id} but {sim_doc_id}!')
# print(task, doc_id, sim_doc_id, predicted_values, ground_truth_values,
# task.nr_of_possible_matches(doc_id),
# task.corpus.get_other_doc_ids_by_same_author(doc_id),
# task.corpus.series_dict[task.reverted[doc_id]])
# print(ground_truth_values, predicted_values)
# print(doc_id, sim_documents, sum(ground_truth_values), task.nr_of_possible_matches(doc_id))
found_ids = set([found_id[0] for found_id in id_annontation[doc_id]])
print(doc_id, len(set(task.ground_truth(doc_id))), set(task.ground_truth(doc_id)))
print(doc_id, len(found_ids), found_ids)
print(doc_id, len(set(task.ground_truth(doc_id)).difference(found_ids)), set(task.ground_truth(doc_id)).difference(found_ids))
print()
missed = {doc_id: list(set(task.ground_truth(doc_id)).difference(found_ids))}
assert sum(ground_truth_values) == task.nr_of_possible_matches(doc_id)
# print(doc_id, ground_truth_values, predicted_values)
ndcg = metrics.ndcg_score(np.array([ground_truth_values]),
np.array([predicted_values]))
return ndcg, id_annontation, missed
@staticmethod
def f1(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
precision = EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same, k=k)
recall = EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same, k=k)
if precision and recall:
f_sum = (precision + recall)
else:
return None
if f_sum == 0:
f_sum = 1
return 2 * (precision * recall) / f_sum
@staticmethod
def fair_f1(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
precision = EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same, k=k)
recall = EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same, k=k)
if precision and recall:
f_sum = (precision + recall)
else:
return None
if f_sum == 0:
f_sum = 1
# print(precision, recall, 2 * (precision * recall) / f_sum)
return 2 * (precision * recall) / f_sum
@staticmethod
def multi_metric(sim_documents, doc_id: str, task: EvaluationTask,
ignore_same: bool = False, k: int = None):
ndcg, doc_id_dict, missed = EvaluationMetric.ndcg(sim_documents, doc_id,
task, ignore_same)
metric_dict = {
"prec": EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same),
"prec01": EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same, k=1),
"prec03": EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same, k=3),
"prec05": EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same, k=5),
"prec10": EvaluationMetric.precision(sim_documents, doc_id,
task, ignore_same, k=10),
"f_prec": EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same),
"f_prec01": EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same, k=1),
"f_prec03": EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same, k=3),
"f_prec05": EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same, k=5),
"f_prec10": EvaluationMetric.fair_precision(sim_documents, doc_id,
task, ignore_same, k=10),
"rec": EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same),
"rec01": EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same, k=1),
"rec03": EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same, k=3),
"rec05": EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same, k=5),
"rec10": EvaluationMetric.recall(sim_documents, doc_id,
task, ignore_same, k=10),
"f_rec": EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same),
"f_rec01": EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same, k=1),
"f_rec03": EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same, k=3),
"f_rec05": EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same, k=5),
"f_rec10": EvaluationMetric.fair_recall(sim_documents, doc_id,
task, ignore_same, k=10),
"f1": EvaluationMetric.f1(sim_documents, doc_id,
task, ignore_same),
"f101": EvaluationMetric.f1(sim_documents, doc_id,
task, ignore_same, k=1),
"f103": EvaluationMetric.f1(sim_documents, doc_id,
task, ignore_same, k=3),
"f105": EvaluationMetric.f1(sim_documents, doc_id,
task, ignore_same, k=5),
"f110": EvaluationMetric.f1(sim_documents, doc_id,
task, ignore_same, k=10),
"f_f1": EvaluationMetric.fair_f1(sim_documents, doc_id,
task, ignore_same),
"f_f101": EvaluationMetric.fair_f1(sim_documents, doc_id,
task, ignore_same, k=1),
"f_f103": EvaluationMetric.fair_f1(sim_documents, doc_id,
task, ignore_same, k=3),
"f_f105": EvaluationMetric.fair_f1(sim_documents, doc_id,
task, ignore_same, k=5),
"f_f110": EvaluationMetric.fair_f1(sim_documents, doc_id,
task, ignore_same, k=10),
"ndcg": ndcg,
"mrr": EvaluationMetric.mrr(sim_documents, doc_id,
task, ignore_same),
"ap": EvaluationMetric.ap(sim_documents, doc_id,
task, ignore_same),
"length_metric": EvaluationMetric.length_metric(sim_documents, doc_id,
task, ignore_same)
}
return metric_dict, doc_id_dict, missed
class Evaluation:
evaluation_metric = EvaluationMetric.precision
@staticmethod
def sample_fun(doc_ids: Set[str], sample_size: int, series_dict: Dict[str, List[str]] = None,
series_sample: bool = False, seed: int = None):
if seed:
random.seed(seed)
if series_sample:
if series_dict is None:
raise UserWarning("No series dict defined!")
series_ids = series_dict.keys()
sampled_series_ids = random.sample(series_ids, sample_size)
return [doc_id for series_id in sampled_series_ids for doc_id in series_dict[series_id]]
else:
return random.sample(doc_ids, sample_size)
@classmethod
def similar_docs_sample_results(cls, vectors, corpus: Corpus, reverted: Dict[str, str],
sample: List[str], topn: int):
results = []
for doc_id in sample:
sim_documents = Vectorization.most_similar_documents(vectors, corpus,
positives=[doc_id],
feature_to_use="NF",
topn=topn,
print_results=False,
series=True)
task = SeriesTask(reverted=reverted, corpus=corpus, topn=topn)
# print(task.nr_of_possible_matches(doc_id))
# print(doc_id, sim_documents)
hard_correct = cls.evaluation_metric(sim_documents, doc_id, task=task)
results.append(hard_correct)
return results
# @staticmethod
# def similar_docs_avg(vectors, corpus: Corpus, reverted: Dict[str, str],
# sample: List[str], topn: int):
# results = []
# soft_results = []
# for doc_id in sample:
# hard_it_results = []
# soft_it_results = []
# for i in range(1, topn+1):
# sim_documents = Vectorization.most_similar_documents(vectors, corpus,
# positives=[doc_id],
# feature_to_use="NF",
# topn=topn,
# print_results=False)
# hard_correct = 0
# soft_correct = 0
# # print(reverted)
# for sim_doc_id, sim in sim_documents:
# if reverted[doc_id] == reverted[sim_doc_id]:
# hard_correct += 1
#
# if corpus.documents[doc_id].authors == corpus.documents[sim_doc_id].authors:
# soft_correct += 1
# hard_correct = hard_correct / len(sim_documents)
# soft_correct = soft_correct / len(sim_documents)
# hard_it_results.append(hard_correct)
# soft_it_results.append(soft_correct)
# # print(doc_id, hard_it_results)
# results.append(EvaluationMath.mean(hard_it_results, std=False))
# soft_results.append(EvaluationMath.mean(soft_it_results, std=False))
# # print('>', len(results))
# return results, soft_results
@staticmethod
def series_eval(vectors: Union[Doc2Vec, DocumentKeyedVectors],
series_dictionary: Dict[str, list],
corpus: Corpus,
sample_size: int = 50,
seed: int = 42,
topn: int = 10):
series_sample = True
reverted = Utils.revert_dictionaried_list(series_dictionary)
doctags = vectors.docvecs.doctags.keys()
doctags = set([doctag for doctag in doctags if doctag[-1].isdigit() or doctag.endswith('_sum')])
# print(doctags)
sample = Evaluation.sample_fun(doctags, sample_size=sample_size, series_dict=series_dictionary, seed=seed,
series_sample=series_sample)
results = Evaluation.similar_docs_sample_results(vectors, corpus, reverted, sample, topn)
# results2, _ = Evaluation.similar_docs_avg(vectors, corpus, reverted, sample, topn)
# print(results)
# print(results2)
# mean = sum(results) / len(results)
# soft_score = sum(soft_results) / len(results)
# print(f'Scores (h|s){mean} | {soft_score}')
return np.array(results)
@staticmethod
def series_eval_bootstrap(vectors: Union[Doc2Vec, DocumentKeyedVectors],
series_dictionary: Dict[str, list],
corpus: Corpus,
sample_size: int = 50,
nr_bootstraps: int = 10,
topn: int = 10,
series_sample: bool = True
):
random.seed(42)
seeds = random.sample([i for i in range(0, nr_bootstraps * 10)], nr_bootstraps)
if nr_bootstraps == 1:
return Evaluation.series_eval(vectors, series_dictionary, corpus, sample_size, seeds[0], topn)
reverted = Utils.revert_dictionaried_list(series_dictionary)
doctags = set([doctag for doctag in vectors.docvecs.doctags.keys() if doctag[-1].isdigit()])
# print(doctags)
# print(seeds)
bootstrap_results = []
for seed in seeds:
sample = Evaluation.sample_fun(doctags, sample_size=sample_size, series_dict=series_dictionary, seed=seed,
series_sample=series_sample)
results_fast = Evaluation.similar_docs_sample_results(vectors, corpus, reverted, sample, topn)
# results_avg, _ = Evaluation.similar_docs_avg(vectors, corpus, reverted, sample, topn)
# print(seed, Evaluation.mean(results_avg))
if not series_sample:
assert len(results_fast) == sample_size == len(sample)
# print('>>', len(results_fast))
# fix for results_avg?
bootstrap_results.append(EvaluationMath.mean(results_fast, std=False))
# print(results_avg)
# print(bootstrap_results)
# print(results2)
assert len(bootstrap_results) == nr_bootstraps
# print(bootstrap_results)
# mean = sum(bootstrap_results) / len(bootstrap_results)
# soft_score = sum(soft_results) / len(bootstrap_results)
# print(f'Scores (h|s){mean} | {soft_score}')
return np.array(bootstrap_results)
@staticmethod
def series_eval_full_data(vectors: Union[Doc2Vec, DocumentKeyedVectors],
series_dictionary: Dict[str, list],
corpus: Corpus,
topn: int = 10):
reverted = Utils.revert_dictionaried_list(series_dictionary)
doctags = vectors.docvecs.doctags.keys()
doctags = [doctag for doctag in doctags if doctag[-1].isdigit()]
logging.info(f'{len(doctags)} document ids found')
results_fast = Evaluation.similar_docs_sample_results(vectors, corpus, reverted, doctags, topn)
# results_avg, _ = Evaluation.similar_docs_avg(vectors, corpus, reverted, doctags, topn)
# print(seed, Evaluation.mean(results_avg))
return np.array(results_fast)
class EvaluationUtils:
@staticmethod
def build_paper_table(cache_df: pd.DataFrame, out_path: str) -> pd.DataFrame:
if isinstance(cache_df, str):
cache_df = | pd.read_csv(cache_df) | pandas.read_csv |
# Extra evaluation functions for additional ML tasks
import pandas as pd
import numpy as np
import pickle
import os
from pathlib import Path
from cvss import CVSS2
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_curve, auc, matthews_corrcoef
from xgboost import XGBClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from lightgbm import LGBMClassifier
from scipy.sparse import hstack, vstack, coo_matrix
# Settings
time_partition = True # Use time-based folds
# Manual features
man_features = ['STARS', 'COMMITS', 'NS', 'LA', 'LD', 'LT', 'NDEV', 'AGE', 'NUC', 'EXP', 'NRF', 'abstract', 'assert', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class', 'continue', 'const', 'default', 'do', 'double', 'else', 'enum', 'exports', 'extends', 'false', 'final', 'finally', 'float', 'for', 'goto', 'if', 'implements', 'import', 'instanceof', 'int', 'interface', 'long', 'module', 'native', 'new', 'null', 'package', 'private', 'protected', 'public', 'requires', 'return', 'short', 'static', 'strictfp', 'super', 'switch', 'synchronized', 'this', 'throw', 'throws', 'transient', 'true', 'try', 'var', 'void', 'volatile', 'while']
# Classift CVSS score severity
def severity(score):
if 0 <= score < 4: return('LOW')
elif 4 <= score < 7: return('Medium')
elif 7 <= score <= 10: return('High')
else: return 'Eh?'
# Get the optimal classifier from the holdout validation process. Return the hyperparameters
def get_best_classifier(problem, feature_type, feature_scope, token, alg, fold, sampling):
# Load the validation results
if problem in ['multiclass', 'binary']: results = pd.read_csv("ml_results/validate.csv")
elif problem == 'combined': results = | pd.read_csv("ml_results/combined_validate.csv") | pandas.read_csv |
"""
data hash pandas / numpy objects
"""
import itertools
from typing import Optional
import numpy as np
from pandas._libs import Timestamp
import pandas._libs.hashing as hashing
from pandas.core.dtypes.cast import infer_dtype_from_scalar
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
# 16 byte long hashing key
_default_hash_key = "0123456789123456"
def _combine_hash_arrays(arrays, num_items: int):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, "Fed in wrong num_items"
out += np.uint64(97531)
return out
def hash_pandas_object(
obj,
index: bool = True,
encoding: str = "utf8",
hash_key: Optional[str] = _default_hash_key,
categorize: bool = True,
):
"""
Return a data hash of the Index/Series/DataFrame.
Parameters
----------
index : bool, default True
Include the index in the hash (if Series/DataFrame).
encoding : str, default 'utf8'
Encoding for data & key when strings.
hash_key : str, default _default_hash_key
Hash_key for string key to encode.
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False)
elif isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
h = Series(h, index=obj, dtype="uint64", copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
if index:
index_iter = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values
for _ in [None]
)
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.items())
num_items = len(obj.columns)
if index:
index_hash_generator = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values # noqa
for _ in [None]
)
num_items += 1
# keep `hashes` specifically a generator to keep mypy happy
_hashes = itertools.chain(hashes, index_hash_generator)
hashes = (x for x in _hashes)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
else:
raise TypeError(f"Unexpected type for hashing {type(obj)}")
return h
def hash_tuples(vals, encoding="utf8", hash_key: str = _default_hash_key):
"""
Hash an MultiIndex / list-of-tuples efficiently
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : str, default 'utf8'
hash_key : str, default _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = | MultiIndex.from_tuples(vals) | pandas.MultiIndex.from_tuples |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list( | date_range("2000", freq="5min", periods=n) | pandas.date_range |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from pandas import DataFrame, Series
from pandas.compat import range, lrange, iteritems
#from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
import pandas.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._get_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip')
result.set_index('pandas_index', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.pandasSQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(PandasSQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Assume that functionality is already tested above so just do quick check that it basically works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
| sql.has_table('test_frame_legacy', self.conn, flavor='sqlite') | pandas.io.sql.has_table |
from absl import logging
from collections import defaultdict
from credoai.utils.common import to_array, NotRunError, ValidationError
from credoai.metrics import Metric, find_metrics, MODEL_METRIC_CATEGORIES
from credoai.modules.credo_module import CredoModule
from fairlearn.metrics import MetricFrame
from scipy.stats import norm
from sklearn.utils import check_consistent_length
from typing import List, Union
import pandas as pd
class PerformanceModule(CredoModule):
"""
Performance module for Credo AI. Handles any metric that can be
calculated on a set of ground truth labels and predictions,
e.g., binary classification, multiclass classification, regression.
This module takes in a set of metrics and provides functionality to:
- calculate the metrics
- create disaggregated metrics
Parameters
----------
metrics : List-like
list of metric names as string or list of Metrics (credoai.metrics.Metric).
Metric strings should in list returned by credoai.metrics.list_metrics.
Note for performance parity metrics like
"false negative rate parity" just list "false negative rate". Parity metrics
are calculated automatically if the performance metric is supplied
y_true : (List, pandas.Series, numpy.ndarray)
The ground-truth labels (for classification) or target values (for regression).
y_pred : (List, pandas.Series, numpy.ndarray)
The predicted labels for classification
y_prob : (List, pandas.Series, numpy.ndarray), optional
The unthresholded predictions, confidence values or probabilities.
sensitive_features : pandas.DataFrame
The segmentation feature(s) which should be used to create subgroups to analyze.
"""
def __init__(self,
metrics,
y_true,
y_pred,
y_prob=None,
sensitive_features=None
):
super().__init__()
# data variables
self.y_true = to_array(y_true)
self.y_pred = to_array(y_pred)
self.y_prob = to_array(y_prob) if y_prob is not None else None
self.perform_disaggregation = True
if sensitive_features is None:
self.perform_disaggregation = False
# only set to use metric frame
sensitive_features = pd.DataFrame({'NA': ['NA'] * len(self.y_true)})
self.sensitive_features = sensitive_features
self._validate_inputs()
# assign variables
self.metrics = metrics
self.metric_frames = {}
self.performance_metrics = None
self.prob_metrics = None
self.failed_metrics = None
self.update_metrics(metrics)
def run(self):
"""
Run performance base module
Returns
-------
self
"""
self.results = {'overall_performance': self.get_overall_metrics()}
if self.perform_disaggregation:
self.results.update(self.get_disaggregated_performance())
return self
def prepare_results(self, filter=None):
"""Prepares results for Credo AI's governance platform
Structures results for export as a dataframe with appropriate structure
for exporting. See credoai.modules.credo_module.
Parameters
----------
filter : str, optional
Regex string to filter fairness results if only a subset are desired.
Passed as a regex argument to pandas `filter` function applied to the
concatenated output of Fairnessmodule.get_fairness_results and
Fairnessmodule.get_disaggregated_performance, by default None
Returns
-------
pd.DataFrame
Raises
------
NotRunError
Occurs if self.run is not called yet to generate the raw assessment results
"""
if self.results is not None:
if 'overall_performance' in self.results:
results = self.results['overall_performance']
else:
results = pd.DataFrame()
if self.perform_disaggregation:
for sf_name in self.sensitive_features:
disaggregated_df = self.results[f'{sf_name}-disaggregated_performance'].copy()
disaggregated_df = disaggregated_df.reset_index().melt(
id_vars=[disaggregated_df.index.name, 'subtype'], var_name='metric_type'
).set_index('metric_type')
disaggregated_df['sensitive_feature'] = sf_name
results = pd.concat([results, disaggregated_df])
if filter:
results = results.filter(regex=filter)
return results
else:
raise NotRunError(
"Results not created yet. Call 'run' with appropriate arguments before preparing results"
)
def update_metrics(self, metrics, replace=True):
"""replace metrics
Parameters
----------
metrics : List-like
list of metric names as string or list of Metrics (credoai.metrics.Metric).
Metric strings should in list returned by credoai.metrics.list_metrics.
Note for performance parity metrics like
"false negative rate parity" just list "false negative rate". Parity metrics
are calculated automatically if the performance metric is supplied
"""
if replace:
self.metrics = metrics
else:
self.metrics += metrics
(self.performance_metrics,
self.prob_metrics,
self.failed_metrics) = self._process_metrics(self.metrics)
self._setup_metric_frames()
def get_df(self):
"""Return dataframe of input arrays
Returns
-------
pandas.DataFrame
Dataframe containing the input arrays
"""
df = pd.DataFrame({'true': self.y_true,
'pred': self.y_pred}).reset_index(drop=True)
df = df.join(self.get_sensitive_features())
if self.y_prob is not None:
y_prob_df = | pd.DataFrame(self.y_prob) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
| Timestamp("2011-01-01 10:00", tz=tz) | pandas.Timestamp |
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def plot(file_path='./', output_name='result', xlabel=None, ylabel=None):
files= os.listdir(file_path)
datas = []
for file in files:
if os.path.splitext(file)[1] != '.csv': continue
data = pd.read_csv(os.path.join(file_path, file))
datas.append(data)
minX = min([len(data) for data in datas])
data = | pd.concat(datas) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
| assert_frame_equal(dropped, expected) | pandas.util.testing.assert_frame_equal |
from datetime import datetime
from itertools import islice
from time import sleep
import openpyxl
import pandas as pd
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.db.models import CharField, EmailField, ManyToManyField
from medusa_website.utils.general import chunks, get_pretty_logger
logger = get_pretty_logger(__name__)
class MemberRecord(models.Model):
email = EmailField(primary_key=True, unique=True)
name = CharField(blank=True, null=True, max_length=256)
end_date = models.DateField("End date")
import_date = models.DateField(auto_now_add=True)
is_welcome_email_sent = models.BooleanField(default=False)
date_welcome_email_sent = models.DateField(null=True, blank=True)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.email = self.email.lower()
super(MemberRecord, self).save()
def send_welcome_email(self):
if self.is_welcome_email_sent is True:
raise Warning("Welcome email already sent!")
msg = self.gen_welcome_email()
msg.send()
self.marked_welcome_email_sent()
def marked_welcome_email_sent(self):
self.is_welcome_email_sent = True
self.date_welcome_email_sent = datetime.today().date()
self.save()
def gen_welcome_email(self):
message = (
f"Dear {self.name},\n\n"
f"Thank you for signing up for MeDUSA.\n\n"
f"Please complete your registration by signing up for the MeDUSA Website here: "
f"https://www.medusa.org.au/accounts/signup/\n\n"
f"Registering will give you access to the MCQ Bank, OSCE Bank and generate your MeDUSA ID number "
f"which will give you MeDUSA discounts for event tickets.\n\n"
f"Additionally our 1st year Survival Guide can be found here: https://www.medusa.org.au/publications/\n\n"
f"Kind regards,\n\n"
f"The MeDUSA team."
)
msg = EmailMultiAlternatives(
subject="Welcome to MeDUSA",
body=message,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[self.email],
bcc=None,
reply_to=["<EMAIL>"],
)
return msg
@property
def is_expired(self) -> bool:
return datetime.today().date() >= self.end_date
@property
def has_members_record_import(self):
return True if self.member_record_imports.all().count() > 0 else False
@property
def exists_as_user(self) -> bool:
from medusa_website.users.models import User
try:
User.objects.get(email=self.email)
return True
except User.DoesNotExist:
return False
@classmethod
def send_welcome_emails_to_all_required(cls):
welcome_emails_and_members = [] # Will be a list of tuples of (Member, EmailMsg)
unsent_members = cls.objects.filter(is_welcome_email_sent=False)
for member in unsent_members:
if not member.exists_as_user and not member.is_expired:
welcome_emails_and_members.append((member, member.gen_welcome_email()))
if len(welcome_emails_and_members) > 0:
from django.core import mail
logger.info(f"Sending {len(welcome_emails_and_members)} Welcome emails")
connection = mail.get_connection() # Use default email connection
emails_sent = ""
for batch in chunks(welcome_emails_and_members, settings.EMAIL_API_MAX_BATCH_SIZE):
emails_to_send = [msg for (member, msg) in batch]
members_emailed = [member for (member, msg) in batch]
emails_sent += "\n".join([msg.to[0] for msg in emails_to_send])
if settings.DEBUG:
logger.warning(
f"DID NOT SEND MESSAGES OR MARK AS SENT, msgs would be sent to"
+ "\n".join([msg.to[0] for msg in emails_to_send])
)
else:
connection.send_messages(emails_to_send)
for member in members_emailed:
member.marked_welcome_email_sent()
sleep(1)
notification_body = f"Sent {len(welcome_emails_and_members)} emails to:\n" f"{emails_sent}"
notification_msg = EmailMultiAlternatives(
subject="Welcome Email Notification",
body=notification_body,
from_email=settings.DEFAULT_FROM_EMAIL,
to=["<EMAIL>"],
bcc=None,
reply_to=["<EMAIL>"],
)
notification_msg.send()
logger.info(f"Sending emails successfully")
else:
logger.info(f"No welcome emails to send!")
class MemberRecordsImport(models.Model):
"""Represents a membership record from the DUSA exports"""
members = ManyToManyField(MemberRecord, blank=True, related_name="member_record_imports")
import_dt = models.DateTimeField("Import time", auto_now_add=True)
report_date = models.DateField(help_text="Date the report was generated", unique=True)
file = models.FileField(upload_to="dusa_reports/%Y", null=True, blank=True)
def import_memberlist(self) -> pd.DataFrame:
logger.info(f"Starting import of memberlist...")
memberlist_excel = openpyxl.load_workbook(self.file.path)
memberlist_sheet = memberlist_excel["Report"]
data = memberlist_sheet.values
cols = next(data)[0:]
data = list(data)
data = (islice(r, 0, None) for r in data)
df = | pd.DataFrame(data, index=None, columns=cols) | pandas.DataFrame |
#basic imports
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.offline as po
import plotly.graph_objects as go
MAPBOX_TOKEN = '<KEY>'
df = | pd.read_csv("https://raw.githubusercontent.com/datasets/covid-19/master/data/time-series-19-covid-combined.csv") | pandas.read_csv |
import os
import random
import matplotlib.pyplot as plt
import torch
import json
import argparse
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from asteroid.metrics import F1Tracker
from asteroid.binarize import Binarize
from asteroid.models.conv_tasnet import VADNet
from asteroid.data.vad_dataset import LibriVADDataset
parser = argparse.ArgumentParser()
parser.add_argument(
"--md_path", type=str, required=True, help="Test directory including the csv files"
)
parser.add_argument(
"--out_dir",
type=str,
required=True,
help="Directory in exp_dir where the eval results will be stored",
)
parser.add_argument(
"--exp_dir",
type=str,
required=True,
help="Directory of the exp",
)
parser.add_argument(
"--n_save_ex", type=int, default=10, help="Number of audio examples to save, -1 means all"
)
parser.add_argument("--threshold", type=float, default=0.5)
compute_metrics = ["accuracy", "precision", "recall", "f1_score"]
def main(conf):
test_set = LibriVADDataset(md_file_path=conf["md_path"], segment=None)
model = VADNet.from_pretrained(os.path.join(conf["exp_dir"], "best_model.pth"))
# Used to reorder sources only
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf["exp_dir"], conf["out_dir"])
ex_save_dir = os.path.join(eval_save_dir, "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
tracker = F1Tracker()
binarizer = Binarize(threshold=conf["threshold"], stability=0.05)
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, labels = test_set[idx]
est = model(mix.unsqueeze(0))
binarized = binarizer(est)
utt_metrics = tracker(binarized, labels)
utt_metrics["source_path"] = test_set.source_path
series_list.append( | pd.Series(utt_metrics) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import CityblockDistance, GeoMidpoint, IsInGeoBox
def test_cityblock():
primitive_instance = CityblockDistance()
latlong_1 = pd.Series([(i, i) for i in range(3)])
latlong_2 = pd.Series([(i, i) for i in range(3, 6)])
primitive_func = primitive_instance.get_function()
answer = pd.Series([414.56051391, 414.52893691, 414.43421555])
given_answer = primitive_func(latlong_1, latlong_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
primitive_instance = CityblockDistance(unit='kilometers')
primitive_func = primitive_instance.get_function()
answer = primitive_func(latlong_1, latlong_2)
given_answer = pd.Series([667.1704814, 667.11966315, 666.96722389])
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_nans():
primitive_instance = CityblockDistance()
lats_longs_1 = [(i, i) for i in range(2)]
lats_longs_2 = [(i, i) for i in range(2, 4)]
lats_longs_1 += [(1, 1), (np.nan, 3), (4, np.nan), (np.nan, np.nan)]
lats_longs_2 += [(np.nan, np.nan), (np.nan, 5), (6, np.nan), (np.nan,
np.nan)]
primitive_func = primitive_instance.get_function()
given_answer = pd.Series(list([276.37367594, 276.35262728] +
[np.nan] * 4))
answer = primitive_func(lats_longs_1, lats_longs_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_error():
error_text = 'Invalid unit given'
with pytest.raises(ValueError, match=error_text):
CityblockDistance(unit='invalid')
def test_midpoint():
latlong1 = pd.Series([(-90, -180), (90, 180)])
latlong2 = pd.Series([(+90, +180), (-90, -180)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_floating():
latlong1 = pd.Series([(-45.5, -100.5), (45.5, 100.5)])
latlong2 = | pd.Series([(+45.5, +100.5), (-45.5, -100.5)]) | pandas.Series |
# -*- coding: utf-8 -*-
import os
import numpy as np
import statsmodels.api as sm # recommended import according to the docs
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats.mstats as mstats
from common import globals as glob
from datetime import datetime, timedelta
import seaborn as sb
sb.set_style('darkgrid')
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from dateutil.relativedelta import relativedelta
import plotly.plotly as py
from plotly.graph_objs import *
THRESHOLD_FOR_TOTAL_NUMBER_OF_STORES = 25
def plot_sparklines_for_countries_with_greatest_increase(countries, df):
data = []
i = 1
for c in countries:
dft, dates, overall_change, overall_change_in_percentage = get_timeseries(df, None, 'countries', None, [c], create_csv=False)
#glob.log.info(dft.columns)
xa='x' + str(i)
ya='y' + str(i)
print('country %s' %(c))
i += 1
trace = Scatter(
x=dates,
y=dft['count'],
fill='tozeroy',
line=Line(
shape='spline',
smoothing=1.3,
width=0.5
),
mode='lines',
name=c,
visible=True,
xaxis=xa,
yaxis=ya,
)
data.append(trace)
layout = Layout(
autosize=False,
height=1000,
showlegend=False,
title='<b>Timeseries for number Starbucks stores 2013-2016</b><br>Countries with the maximum percentage increase in number Starbucks stores. <br><i>Only includes countries with at least 25 stores as of November 2016.</i>',
width=800)
i = 1
#xdomain and ydomain are divisions in which the plots will be displayed, we are
#looking for a 3x5 display
xdomain = [[0, 0.25], [0.33, 0.6], [0.7, 1.0]]
ydomain = [[0.8, 0.95], [0.6, 0.75], [0.4, 0.55], [0.2, 0.35], [0.0, 0.15]]
#we would like replace the country code with the name, this mapping is available in WDI dataset
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.WDI_CSV_FILE_AFTER_CLEANING)
df_WB=pd.read_csv(fname)
df_WB = df_WB.set_index('country_code')
for c in countries:
xa = 'xaxis' + str(i)
ya = 'yaxis' + str(i)
layout[xa] = dict(XAxis(
anchor='y' + str(i),
autorange=True,
domain=xdomain[(i%3) - 1],
mirror=False,
showgrid=False,
showline=False,
showticklabels=False,
showticksuffix='none',
title=df_WB.ix[c]['name'],
titlefont=dict(
#family='Courier New, monospace',
size=12,
#color='#7f7f7f'
),
zeroline=False
))
layout[ya] = dict(YAxis(
#autorange=False,
#range=[0,3000],
autorange=True,
anchor='x' + str(i),
domain=ydomain[(i%5) - 1],
mirror=False,
showgrid=False,
showline=False,
showticklabels=True,
showticksuffix='last',
title='',
type='linear',
zeroline=False
))
#move to the next
i += 1
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='sparklines')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'countries_w_greatest_increase_in_starbucks_stores.png')
py.image.save_as(fig, filename=fname)
def plot_bar_graph_for_abs_increase(df):
import plotly.plotly as py
import plotly.graph_objs as go
df1 = df.sort_values(by='overall_change', ascending=False)
trace1 = go.Bar(
x=df1['country'],
y=df1['overall_change'],
name='Overall change in number of stores',
marker=dict(
#color='rgb(55, 83, 109)'
color='rgb(49,130,189)'
)
)
data = [trace1]
layout = go.Layout(
title='Overall increase in number of Starbucks stores from 2013 to 2016',
xaxis=dict(
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='Increase in number of Starbucks stores',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
),
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0)',
bordercolor='rgba(255, 255, 255, 0)'
),
barmode='group',
bargap=0.15,
bargroupgap=0.1
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='style-bar')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'bar_graph_for_increase_in_number_of_starbucks_stores.png')
py.image.save_as(fig, filename=fname)
def get_next_month_and_year(prev_date):
next_month = prev_date.month + 1
next_year = prev_date.year
if next_month == 13:
next_month = 1
next_year += 1
return next_year, next_month
def get_timeseries(df, dir_name, scope, scope_label, scope_list, create_csv=True):
#countries = ['US', 'CN', 'CA', 'IN', 'GB', 'JP', 'FR']
#create a new df based on scope
if scope == 'all':
glob.log.info('scope is all so no filtering needed....')
elif scope == 'continent':
glob.log.info('scope is continent...')
glob.log.info(scope_list)
df = df[df['continent'].isin(scope_list)]
elif scope == 'countries':
glob.log.info('scope is countries...')
glob.log.info(scope_list)
df = df[df['country'].isin(scope_list)]
elif scope == 'US_states':
glob.log.info('scope is US states...')
glob.log.info(scope_list)
df = df[(df['country'] == 'US') & (df['country_subdivision'].isin(scope_list))]
else:
glob.log.info('unknown scope -> %s, defaulting to scope=all' %(scope))
#add a new datetime field which holds the DateTime version of the first seent field
df['first_seen_as_dt'] = pd.to_datetime(df['first_seen'])
start_date = min(df['first_seen_as_dt'])
final_date = max(df['first_seen_as_dt'])
final_year = final_date.year
final_month = final_date.month
glob.log.info('start date: %s, final date %s' %(str(start_date), str(final_date)))
#create a new dataframe to hold the timeseries data
dft = pd.DataFrame(columns=['date', 'count'])
dates = []
counts = []
#add the first element
count = len(df[df['first_seen_as_dt'] == start_date])
dates.append(start_date)
counts.append(float(count))
prev_date = start_date
while True:
next_year, next_month = get_next_month_and_year(prev_date)
if (next_year > final_year) or (next_year == final_year and next_month > (final_month + 1)):
glob.log.info('reached end of timeseries data at year=%d, month=%d' %(next_year, next_month))
break
next_date = datetime(next_year, next_month, 1)
count += len(df[(df['first_seen_as_dt'] > prev_date) & (df['first_seen_as_dt'] <= next_date)])
#glob.log.info('date %s, count %d' %(next_date, count))
dates.append(next_date)
counts.append(float(count))
#move to the next date
prev_date = next_date
dft['date'] = dates
dft['count'] = counts
#add a rate parameter as well to see what is the rate of increase (or decrease) with time
dft['change'] = dft['count'] - dft['count'].shift()
overall_change = sum(dft['change'].dropna())
#2nd order differences, to see if there is an increase in the differences themselves
dft['change_in_percentage'] = 100*((dft['count'] - dft['count'].shift())/(dft['count']))
overall_change_in_percentage = sum(dft['change_in_percentage'].dropna())
if create_csv == True:
fname = os.path.join(dir_name, scope_label + '_timeseries.csv')
dft.to_csv(fname, index=False)
dft = dft.set_index('date')
return dft, dates, overall_change, overall_change_in_percentage
def explore_timeseries(df, scope, scope_label, scope_list, order=(2, 1, 2)):
#create subdir for the scope so that all plots can be kept in that directory
dir_name = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, scope_label)
os.makedirs(dir_name, exist_ok = True)
#et the series to be analyzed
dft, dates, overall_change, overall_change_in_percentage = get_timeseries(df, dir_name, scope, scope_label, scope_list)
#plot it
print(dft['count'])
dft['count'].plot(figsize=(16, 12))
fname = os.path.join(dir_name, 'num_stores.png')
plt.savefig(fname)
decomposition = seasonal_decompose(dft['count'], model='additive', freq=5)
fig = plt.figure()
fig = decomposition.plot()
fname = os.path.join(dir_name, 'decomposition.png')
plt.savefig(fname)
#store the df column as a time series, for easier processing
ts = dft['count']
#take a log of the series and then a difference of the logs, this is needed
#to make the series stationary
ts_log = np.log(dft['count'])
ts_log_diff = ts_log - ts_log.shift()
#we choose the ARIMA model on the log of the series
model = ARIMA(ts_log, order=order)
results_ARIMA = model.fit(disp=-1)
#plot the differences and overlay the fitted values to get a sense
#of how good the model is
fig = plt.figure()
plt.plot(ts_log_diff)
plt.plot(results_ARIMA.fittedvalues, color='red')
plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff[1:])**2))
fname = os.path.join(dir_name, 'log_diff_and_fitted_values.png')
plt.savefig(fname)
#now begin converting the fitted values into the original scale
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
#retrieve the log of the predicted values by adding the cumulative sum to the original
#starting value
predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
#all done, now recreate the entire series to the original scale
fig = plt.figure()
predictions_ARIMA = np.exp(predictions_ARIMA_log)
ax=plt.gca()
plt.plot(ts, label='Actual')
plt.ylabel('Number of stores')
ax = predictions_ARIMA.plot(ax=ax, style='r--', label='Predicted');
plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))
ax.legend()
fname = os.path.join(dir_name, 'orig_with_fitted_values.png')
plt.savefig(fname)
#create dates for the next one year
next_year, next_month = get_next_month_and_year(dates[-1])
start = datetime(next_year, next_month, 1)
date_list = [start + relativedelta(months=x) for x in range(0,12)]
future = pd.DataFrame(index=date_list, columns= dft.columns)
original_len_of_ts = len(dft)
dft = pd.concat([dft, future])
#for some reason we have to provide the start and end as integers
#and only then it works...dates as strings do not work, so we do this
#roundabout thing of providing integers as index and then changing the
#index to date strings once we have the predicted values..
#we predict next 12 months of data
predict_counts = results_ARIMA.predict(start=original_len_of_ts-1, end=original_len_of_ts+10, dynamic=True)
predict_counts.index = date_list
predict_counts = results_ARIMA.fittedvalues.append(predict_counts)
predictions_ARIMA_diff = pd.Series(predict_counts, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
ts_log = np.log(dft['count'])
predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
predictions_ARIMA = np.exp(predictions_ARIMA_log)
fig = plt.figure()
ax=plt.gca()
plt.plot(ts, label='Actual')
plt.ylabel('Number of stores')
ax = predictions_ARIMA[:original_len_of_ts].plot(ax=ax, style='r--', label='Fitted');
ax = predictions_ARIMA[original_len_of_ts-1:].plot(ax=ax, style='g--', label='Predicted');
plt.title('Forecasted number of stores')
ax.legend()
fname = os.path.join(dir_name, 'orig_with_predicted_values.png')
plt.savefig(fname)
plt.close('all')
fname = os.path.join(dir_name, scope_label + '_timeseries_w_predictions.csv')
predictions_ARIMA.to_csv(fname, index_label='date', header=True)
def run():
glob.log.info('about to begin additional analysis...')
plt.close('all')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.SB_CSV_FILE_W_FEATURES)
df_sb = pd.read_csv(fname)
#explore the following combinations
explore_timeseries(df_sb, 'all', 'World', None, (2, 1, 1)) #the entire world
#found by trial and error -> 2,1,1 works better
explore_timeseries(df_sb, 'continent', 'Africa', ['Africa']) #the continent of Africa
explore_timeseries(df_sb, 'US_states', 'US_NY', ['NY']) #the state of New York
explore_timeseries(df_sb, 'countries', 'IN', ['IN']) #India
explore_timeseries(df_sb, 'countries', 'GB', ['GB']) #the U.K.
explore_timeseries(df_sb, 'countries', 'US_CN', ['US', 'CN'],(1, 1, 0)) #the U.S. and China combined
#do another analysis, find the countries with highest rate of increase
dir_name = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'all')
os.makedirs(dir_name, exist_ok = True)
rate_of_growth = { 'country':[], 'total': [], 'overall_change':[], 'overall_change_in_percentage':[]}
for c in df_sb['country'].unique():
dft, dates, overall_change, overall_change_in_percentage = get_timeseries(df_sb, None, 'countries', None, [c], create_csv=False)
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'all', c + '_timeseries.csv')
dft.to_csv(fname)
rate_of_growth['country'].append(c)
rate_of_growth['total'].append(dft['count'][dft.index[-1]])
rate_of_growth['overall_change'].append(overall_change)
rate_of_growth['overall_change_in_percentage'].append(overall_change_in_percentage)
glob.log.info('overall change in number of stores in %s is %d' %(c, overall_change))
df1 = | pd.DataFrame(rate_of_growth) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in | range(3) | pandas.compat.range |
"""Tests for static validation."""
from datetime import datetime
import numpy as np
import pandas as pd
from delphi_validator.datafetcher import FILENAME_REGEX
from delphi_validator.report import ValidationReport
from delphi_validator.static import StaticValidator
class TestCheckMissingDates:
def test_empty_filelist(self):
params = {"data_source": "", "span_length": 8,
"end_date": "2020-09-09", "expected_lag": {}}
validator = StaticValidator(params)
report = ValidationReport([])
report = ValidationReport([])
filenames = list()
validator.check_missing_date_files(filenames, report)
assert len(report.raised_errors) == 1
assert "check_missing_date_files" in [
err.check_data_id[0] for err in report.raised_errors]
assert len(report.raised_errors[0].expression) == 9
def test_same_day(self):
params = {"data_source": "", "span_length": 0,
"end_date": "2020-09-01", "expected_lag": {}}
validator = StaticValidator(params)
report = ValidationReport([])
filenames = [("20200901_county_signal_signal.csv", "match_obj")]
validator.check_missing_date_files(filenames, report)
assert len(report.raised_errors) == 0
assert "check_missing_date_files" not in [
err.check_data_id[0] for err in report.raised_errors]
def test_duplicate_dates(self):
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
validator = StaticValidator(params)
report = ValidationReport([])
filenames = [("20200901_county_signal_signal.csv", "match_obj"),
("20200903_county_signal_signal.csv", "match_obj"),
("20200903_usa_signal_signal.csv", "match_obj"),
("20200903_usa_signal_signal.csv", "match_obj")]
validator.check_missing_date_files(filenames, report)
assert len(report.raised_errors) == 1
assert "check_missing_date_files" in [
err.check_data_id[0] for err in report.raised_errors]
assert len([err.expression[0] for
err in report.raised_errors if err.check_data_id[0] ==
"check_missing_date_files"]) == 1
assert [err.expression[0] for
err in report.raised_errors if err.check_data_id[0] ==
"check_missing_date_files"][0] == datetime.strptime("20200902", "%Y%m%d").date()
class TestNameFormat:
def test_match_existence(self):
pattern_found = FILENAME_REGEX.match("20200903_usa_signal_signal.csv")
assert pattern_found
pattern_found = FILENAME_REGEX.match("2020090_usa_signal_signal.csv")
assert not pattern_found
pattern_found = FILENAME_REGEX.match("20200903_usa_signal_signal.pdf")
assert not pattern_found
pattern_found = FILENAME_REGEX.match("20200903_usa_.csv")
assert not pattern_found
def test_expected_groups(self):
pattern_found = FILENAME_REGEX.match(
"20200903_usa_signal_signal.csv").groupdict()
assert pattern_found["date"] == "20200903"
assert pattern_found["geo_type"] == "usa"
assert pattern_found["signal"] == "signal_signal"
class TestCheckBadGeoIdFormat:
params = {"data_source": "", "span_length": 0,
"end_date": "2020-09-02", "expected_lag": {}}
def test_empty_df(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
empty_df = pd.DataFrame(columns=["geo_id"], dtype=str)
validator.check_bad_geo_id_format(empty_df, "name", "county", report)
assert len(report.raised_errors) == 0
def test_invalid_geo_type(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
empty_df = pd.DataFrame(columns=["geo_id"], dtype=str)
validator.check_bad_geo_id_format(empty_df, "name", "hello", report)
assert len(report.raised_errors) == 1
assert "check_geo_type" in [
err.check_data_id[0] for err in report.raised_errors]
assert [err.expression for
err in report.raised_errors if err.check_data_id[0] ==
"check_geo_type"][0] == "hello"
def test_invalid_geo_id_county(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["0", "54321", "123", ".0000",
"abc12"], columns=["geo_id"])
validator.check_bad_geo_id_format(df, "name", "county", report)
assert len(report.raised_errors) == 1
assert "check_geo_id_format" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 2
assert "54321" not in report.raised_errors[0].expression
def test_invalid_geo_id_msa(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["0", "54321", "123", ".0000",
"abc12"], columns=["geo_id"])
validator.check_bad_geo_id_format(df, "name", "msa", report)
assert len(report.raised_errors) == 1
assert "check_geo_id_format" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 2
assert "54321" not in report.raised_errors[0].expression
def test_invalid_geo_id_hrr(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["1", "12", "123", "1234", "12345",
"a", ".", "ab1"], columns=["geo_id"])
validator.check_bad_geo_id_format(df, "name", "hrr", report)
assert len(report.raised_errors) == 1
assert "check_geo_id_format" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 5
assert "1" not in report.raised_errors[0].expression
assert "12" not in report.raised_errors[0].expression
assert "123" not in report.raised_errors[0].expression
def test_invalid_geo_id_state(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["aa", "hi", "HI", "hawaii",
"Hawaii", "a", "H.I."], columns=["geo_id"])
validator.check_bad_geo_id_format(df, "name", "state", report)
assert len(report.raised_errors) == 1
assert "check_geo_id_format" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 4
assert "aa" not in report.raised_errors[0].expression
assert "hi" not in report.raised_errors[0].expression
assert "HI" not in report.raised_errors[0].expression
def test_invalid_geo_id_national(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["usa", "SP", " us", "us",
"usausa", "US"], columns=["geo_id"])
validator.check_bad_geo_id_format(df, "name", "national", report)
assert len(report.raised_errors) == 1
assert "check_geo_id_format" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 3
assert "us" not in report.raised_errors[0].expression
assert "US" not in report.raised_errors[0].expression
assert "SP" not in report.raised_errors[0].expression
class TestDuplicatedRows:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_no_duplicates(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([["a", "1"], ["b", "2"], ["c", "3"]])
validator.check_duplicate_rows(df, "file", report)
assert len(report.raised_warnings) == 0
def test_single_column_duplicates_but_not_row(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([["a", "1"], ["a", "2"], ["b", "2"]])
validator.check_duplicate_rows(df, "file", report)
assert len(report.raised_warnings) == 0
def test_non_consecutive_duplicates(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([["a", "1"], ["b", "2"], ["a", "1"]])
validator.check_duplicate_rows(df, "file", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].expression == [2]
assert report.raised_warnings[0].check_data_id[1] == "file"
def test_multiple_distinct_duplicates(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([["a", "1"], ["b", "2"], ["a", "1"], ["b", "2"]])
validator.check_duplicate_rows(df, "file", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].expression == [2, 3]
def test_more_than_two_copies(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([["a", "1"], ["b", "2"], ["b", "2"], ["b", "2"]])
validator.check_duplicate_rows(df, "file", report)
assert len(report.raised_warnings) == 1
assert report.raised_warnings[0].expression == [2, 3]
class TestCheckBadGeoIdValue:
params = {"data_source": "", "span_length": 0,
"end_date": "2020-09-02", "expected_lag": {},
"validator_static_file_dir": "../static"}
def test_empty_df(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
empty_df = pd.DataFrame(columns=["geo_id"], dtype=str)
validator.check_bad_geo_id_value(empty_df, "name", "county", report)
assert len(report.raised_errors) == 0
def test_invalid_geo_id_county(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["01001", "88888", "99999"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "county", report)
assert len(report.raised_errors) == 1
assert "check_bad_geo_id_value" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 2
assert "01001" not in report.raised_errors[0].expression
assert "88888" in report.raised_errors[0].expression
assert "99999" in report.raised_errors[0].expression
def test_invalid_geo_id_msa(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["10180", "88888", "99999"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "msa", report)
assert len(report.raised_errors) == 1
assert "check_bad_geo_id_value" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 2
assert "10180" not in report.raised_errors[0].expression
assert "88888" in report.raised_errors[0].expression
assert "99999" in report.raised_errors[0].expression
def test_invalid_geo_id_hrr(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["1", "11", "111", "8", "88",
"888"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "hrr", report)
assert len(report.raised_errors) == 1
assert "check_bad_geo_id_value" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 3
assert "1" not in report.raised_errors[0].expression
assert "11" not in report.raised_errors[0].expression
assert "111" not in report.raised_errors[0].expression
assert "8" in report.raised_errors[0].expression
assert "88" in report.raised_errors[0].expression
assert "888" in report.raised_errors[0].expression
def test_invalid_geo_id_state(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["aa", "ak"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "state", report)
assert len(report.raised_errors) == 1
assert "check_bad_geo_id_value" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 1
assert "ak" not in report.raised_errors[0].expression
assert "aa" in report.raised_errors[0].expression
def test_uppercase_geo_id(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["ak", "AK"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "state", report)
assert len(report.raised_errors) == 0
assert len(report.raised_warnings) == 1
assert "check_geo_id_lowercase" in report.raised_warnings[0].check_data_id
assert "AK" in report.raised_warnings[0].expression
def test_invalid_geo_id_national(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame(["us", "zz"], columns=["geo_id"])
validator.check_bad_geo_id_value(df, "name", "national", report)
assert len(report.raised_errors) == 1
assert "check_bad_geo_id_value" in report.raised_errors[0].check_data_id
assert len(report.raised_errors[0].expression) == 1
assert "us" not in report.raised_errors[0].expression
assert "zz" in report.raised_errors[0].expression
class TestCheckBadVal:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_empty_df(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
empty_df = pd.DataFrame(columns=["val"])
validator.check_bad_val(empty_df, "", "", report)
validator.check_bad_val(empty_df, "", "prop", report)
validator.check_bad_val(empty_df, "", "pct", report)
assert len(report.raised_errors) == 0
def test_missing(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([np.nan], columns=["val"])
validator.check_bad_val(df, "name", "signal", report)
assert len(report.raised_errors) == 1
assert "check_val_missing" in report.raised_errors[0].check_data_id
def test_lt_0(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = pd.DataFrame([-5], columns=["val"])
validator.check_bad_val(df, "name", "signal", report)
assert len(report.raised_errors) == 1
assert "check_val_lt_0" in report.raised_errors[0].check_data_id
def test_gt_max_pct(self):
validator = StaticValidator(self.params)
report = ValidationReport([])
df = | pd.DataFrame([1e7], columns=["val"]) | pandas.DataFrame |
#################################
## Preamble
# import necessary modules/tools
import math
import numpy as np
import os
import pandas as pd
import scipy as sc
import sympy as sp
import sys
from types import FunctionType
# # # # # # # # #
#################################
## Universal Variables/Methods/Classes
# common functions
def diagonality(matrix):
"""Determines if matrix is strictly, diagonally dominant.
Parameters
----------
matrix : array
Input matrix to be tested.
Returns
-------
is_strict_diagonal_matrix : boolean
Truth value whether matrix is strictly, diagonally dominant.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Will print to console either if strictly, diagonally dominant, or if matrix, `A` is not strictly, diagonally dominant which could lead to poor solution of 'Ax = b'.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
i, diags, long = 0, np.zeros_like(A), np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A):
aij = A[i][j]
if i == j: long[i][j] = aij
else: diags[i][j] = aij
j += 1
i += 1
if np.sum(long) >= np.sum(diags):
print(f"Information: Matrix, {matrix_name} is strictly, diagonally dominant.")
is_strict_diagonal_matrix = True
else:
is_strict_diagonal_matrix = False
print(f"Warning! Matrix, {matrix_name} is not strictly, diagonally dominant. Solution may be inaccurate.")
return is_strict_diagonal_matrix
def eigen_values(matrix):
"""Directly finds eigenvalues of matrix by its determinant. Not recommended for large, sparse matrices.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
lambdas : array
Eigenvector containing roots.
Raises
------
IndexError
Matrix of interest must be square.
"""
# See Also
# --------
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
sym_r = sp.Symbol("r")
i, identityA = 0, np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A[0]):
if i == j: identityA[i][j] = 1
j += 1
i += 1
lambda_identity = identityA*sym_r
determinant = sp.det(sp.Matrix(A - lambda_identity))
roots = sp.solve(determinant)
lambdas = []
for r in roots:
r = complex(r)
if np.imag(r) == 0: r = np.real(r)
lambdas.append(r)
return lambdas
# preceded by eigen_values
def spectral_radius(matrix):
"""Finds the spectral radius of matrix.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
rho : float
Spectral radius.
Raises
------
IndexError
Matrix of interest must be square.
See Also
--------
eigen_values() : Function to find eigenvector of A.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
rho = np.max(np.abs(eigen_values(A)))
return rho
# preceded by spectral_radius
class norms:
def __init__(self, x, x0=[]):
"""
Parameters
----------
x : array
Newly approximated array.
x0 : array, optional
Previously approximated array.
Yields
------
self.vec_name : string
Connote symbol name as 'x'.
self.x : array
Newly approximated array.
self.old_vec_name : string
Connote symbol name as 'x0'.
self.x0 : array
Previously approximated array.
Raises
------
IndexError
If the input vectors are not the same length.
"""
self.vec_name, self.x = "x", np.array(x)
self.old_vec_name, self.x0 = "x0", np.array(x0)
if not(self.x0.shape[0] == 0 or len(x) == len(x0)):
raise IndexError(f"ERROR! {self.vec_name}, and {self.old_vec_name} must be the same size!")
def l_infinity(self):
"""Maximum difference between absolute sum of i'th rows.
Returns
-------
norm : float
Scalar value.
Yields
------
self.norm : float
Scalar value.
Raises
------
IndexError
If the input vectors are not the same length.
Notes
-----
Best thought as "actual" distance between vectors.
Also calculates infinity norm of matrix(ces).
Examples
--------
[x0] = (1, 1, 1)^(t)
[x] = (1.2001, 0.99991, 0.92538)^(t)
||x0 - x|| = max{|1 - 1.2001|, |1 - 0.99991|, |1 - 0.92538|}
||x0 - x|| = 0.2001
"""
vec_name, x = self.vec_name, self.x
old_vec_name, x0 = self.old_vec_name, self.x0
# initialize loop
norm_i = np.zeros_like(x)
if x0.shape[0] == 0:
if np.sum(x.shape) == x.shape[0]:
for i in range(x.shape[0]):
# evaluate and store norm, ||.||
norm_i[i] = abs(x[i])
elif np.sum(x.shape) > x.shape[0]:
norm_ij = np.zeros_like(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
# evaluate and store norm, ||.||
norm_ij[i][j] = abs(x[i][j])
norm_i[i] = np.sum(norm_ij[i][:])
elif len(x) == len(x0):
if np.sum(x0.shape) == x0.shape[0]:
for i in range(x0.shape[0]):
norm_i[i] = abs(x[i] - x0[i])
elif np.sum(x0.shape) > x0.shape[0]:
if np.sum(x.shape) == np.sum(x0.shape):
for i in range(x0.shape[0]):
for j in range(x0.shape[1]):
norm_ij = np.zeros_like(x)
# evaluate and store norm, ||.||
norm_ij[i][j] = abs(x[i][j] - x0[i][j])
norm_i[i] = np.sum(norm_ij[i][:])
elif np.sum(x.shape) == np.sum(x0.shape):
for i in range(x0.shape[0]):
# evaluate and store norm, ||.||
norm_i[i] = abs(x[i] - x0[i])
else:
raise IndexError(f"ERROR! {vec_name}, and {old_vec_name} must be the same size!")
# if no errors, then evaluate norm
self.norm = np.amax(norm_i)
# return the l_infinity norm
return self.norm
def l_two(self):
"""Square root of sum of differences squared along i'th row.
Returns
-------
norm : float
Scalar value.
Yields
------
self.norm : float
Scalar value.
Raises
------
IndexError
If the input vectors are not the same length.
See Also
--------
spectral_radius() : Function to find the spectral radius of vector.
Examples
--------
[x0] = (1, 1, 1)^(t)
[x] = (1.2001, 0.99991, 0.92538)^(t)
||x0 - x|| = sqrt[ (1 - 1.2001)^2 \
+ (1 - 0.99991)^2 + (1 - 0.92538)^2 ]
||x0 - x|| = 0.21356
"""
vec_name, x = self.vec_name, self.x
old_vec_name, x0 = self.old_vec_name, self.x0
if x0.shape[0] == 0:
# initialize loop
norm_i = np.zeros_like(x)
if np.sum(x.shape) == x.shape[0]:
for i in range(len(x)):
# evaluate and store norm, ||.||
norm_i[i] += x[i]**2
norm = math.sqrt(np.sum(norm_i))
elif np.sum(x.shape) > x.shape[0]:
x0 = np.reshape(x, (x.shape[0], x.shape[1]))
xt = np.reshape(x, (x.shape[1], x.shape[0]))
norm = math.sqrt(spectral_radius(x0*xt))
elif len(x) == len(x0):
if np.sum(x0.shape) > x0.shape[0]:
x0 = np.reshape(x0, (x0.shape[0], x0.shape[1]))
xt = np.reshape(x, (x0.shape[1], x0.shape[0]))
else:
x0 = np.reshape(x0, (len(x0), 1))
xt = np.reshape(x, (1, len(x0)))
# xt = np.reshape(x, (1, x.shape[0]))
norm = math.sqrt(spectral_radius(x0*xt))
else:
raise IndexError(f"ERROR! {vec_name}, and {old_vec_name} must be the same size!")
self.norm = norm
return norm
# preceded by norms.()l_infinity() and norms().l_two()
def condition_number(matrix, norm_type="l_two"):
"""Find the condition number of a given matrix and norm type.
Parameters
----------
matrix : array
Input matrix for analysis.
norm_type : string, optional
Selects norm comparison which is 'l_two' by default.
Returns
-------
k : float
Condition number of matrix, A.
Warnings
--------
Will output evaluation of condition number and show in console.
See Also
--------
norms().l_two() : Method that yields the l_2 norm.
norms().l_infinity() : Method that yields the l_infinity norm.
"""
matrix_name, A = "A", np.array(matrix)
i, A_inv = 0, np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A):
aij = A[i][j]
if aij != 0: A_inv[i][j] = 1/aij
j += 1
i += 1
if norm_type == "l_infinity":
norm, abnorm = norms(A).l_infinity(), norms(A_inv).l_infinity()
elif norm_type == "l_two":
norm, abnorm = norms(A).l_two(), norms(A_inv).l_two()
k = norm*abnorm
print(f"Information: Condition Number K({matrix_name}) = {k}")
return k
def make_array(domain, function, variable=sp.Symbol("x")):
"""Maps domain to range.
Parameters
----------
domain : array
Collection if input data.
function : expression
Function that maps the domain to range.
variable : string, optional
Sympy symbol or string representation of variable to respect in function.
Returns
-------
g : tuple
Mapped range from function.
Warnings
--------
Prints to console the input expression, and that the expression was in fact used.
"""
if isinstance(function, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
i, X, g = 0, np.array(domain), np.zeros_like(domain)
while i < len(X):
j = 0
if np.sum(X.shape) > np.sum(X.shape[0]):
while j < len(X[0]):
g[i][j] = (function(X[i][j]))
j += 1
else: g[i] = function(X[i])
i += 1
return tuple(g)
def symmetry(matrix):
"""Determines boolean truth value whether given matrix is symmetric.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
is_symmetric : bool
True if symmetric, else False.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Console print that A is either symmetric or asymmetric.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
i, At, is_symmetric = 0, np.transpose(A), False
for ai in A:
j = 0
for aj in ai:
if aj == At[i][j]: is_symmetric = True
else:
is_symmetric = False
print(f"Warning! Matrix, {matrix_name} is not symmetric.")
return is_symmetric
j += 1
i += 1
if is_symmetric: print(f"Information: Matrix, {matrix_name} is symmetric.")
return is_symmetric
def tridiagonality(matrix):
"""Determine boolean truth value whether given matrix is tridiagonal.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
is_tridiagonal : bool
True if tridiagonal, else False.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Prints to console that matrix is either tridiagonal or not.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
diagonals = np.diagflat(np.diag(A))
above = np.diagflat(np.diag(A, k=1), k=1)
below = np.diagflat(np.diag(A, k=-1), k=-1)
non_A = A - (diagonals + above + below)
if np.sum(non_A) != 0:
is_tridiagonal = False
print(f"Warning! Matrix, {matrix_name} is not tridiagonal.")
else:
is_tridiagonal = True
print(f"Information: Matrix, {matrix_name} is tridiagonal.")
return is_tridiagonal
# # # # # # # # #
#################################
## Specific Functions
# --------------------
# eigenvalue solvers
class DirectSolver:
def __init__(self, A, power, max_iter=100):
"""
Parameters
----------
A : tuple
Characteristic matrix.
power : int
Signed power to which function error must be within.
max_iter : int, optional
Maximum iterations for which function may loop.
Yields
------
self.A : tuple
Either input functions or matrix of characteristic values.
self.tol : float
Specified tolerance to which method terminates.
self.max_iter : int
Maximum iterations allowed for method.
self.is_diagonal : bool
Truth value of whether matrix is diagonal.
self.eigenvalues : tuple
Eigenvalues of characteristic matrix, A.
self.spectral_radius : float
Spectral radius of characteristic matrix, A.
self.condition_number : float
Condition number of characteristic matrix, A.
Raises
------
IndexError
Matrix of interest must be square.
ValueError
If iterations constraint is not an integer.
Warnings
--------
Not recommended to use eigen_values() to find eigenvalues of characteristic matrix, A; therefore, do not use eigen_values() if matrix, A is a large, sparse matrix if desiring quick calculations.
See Also
--------
eigen_values() : Function to find eigenvalues of A.
spectral_radius() : Function that finds the spectral radius of characteristic matrix, A.
Notes
-----
Specified tolerance evaluated by `10**power`.
`norm_type` may be either `'l_infinity'` or `'l_two'` but is 'l_infinity' by default.
If `self.is_diagonal` is True, then matrix is diagonal. Else, not diagonal.
"""
matrix_name, A = "A", np.array(A)
if np.sum(A.shape[0]) != np.sum(A.shape[1]): raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
if max_iter <= 0 or not isinstance(max_iter, (int, float)): raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {max_iter} was given and not understood.")
self.A = A
self.tol = float(10**power)
self.max_iter = int(max_iter)
self.is_diagonal = diagonality(A)
self.is_tridiagonal = tridiagonality(A)
# self.eigen_values = eigen_values(A)
# self.spectral_radius = spectral_radius(A)
# self.condition_number = condition_number(A, norm_type)
def power_method(self, x):
"""Approximate the dominant eigenvalue and associated eigenvector of matrix, A given some non-zero vector, x.
Parameters
----------
x : array
Numpy array.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.iterations : tuple
Collection of iterations through method.
self.mu : tuple
Collection of approximately largest eigenvalue.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
mu = [norms(x).l_infinity()]
x = x/mu[-1]
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
y = np.matmul(A, x)
for yi in y:
if np.abs(yi) == norms(y).l_infinity():
yp = float(yi)
mu.append(yp)
eigenvectors.append(y/yp)
errors.append(norms(x, eigenvectors[-1]).l_infinity())
x = eigenvectors[-1]
k += 1
self.iterations = tuple(range(k))
self.mu = tuple(mu)
self.lambdas = tuple(eigenvectors)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Mu": self.mu, "Lambdas": self.lambdas, "Errors": self.errors})
def inverse_power_method(self, x, q):
"""Approximate eigenvalue closest to target, q and associated eigenvector of matrix, A given some non-zero vector, x.
Parameters
----------
x : array
Numpy array.
q : float
Target to which the closest eigenvalue of matrix will be found.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.iterations : tuple
Collection of iterations through method.
self.mu : tuple
Collection of approximately largest eigenvalue.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
self.q = float(q)
A = np.linalg.inv(A-q*np.identity(len(A)))
mu = [1/norms(x).l_infinity() + q]
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
y = np.matmul(A, x)
for yi in y:
if np.abs(yi) == norms(y).l_infinity():
yp = float(yi)
mu.append(1/yp + q)
eigenvectors.append(y/yp)
errors.append(norms(x, x0=eigenvectors[-1]).l_infinity())
x = eigenvectors[-1]
k += 1
self.iterations = tuple(range(k))
self.mu = tuple(mu)
self.lambdas = tuple(eigenvectors)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Mu": self.mu, "Lambdas": self.lambdas, "Errors": self.errors})
def qr_algorithm(self):
"""Approximate dominant eigenvalue and associated eigenvector of matrix, A.
Source: https://www.youtube.com/watch?v=FAnNBw7d0vg
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
"""
A, tol, N = self.A, self.tol, self.max_iter
k, eigenvectors, errors = 1, [np.diag(A)], [1]
while errors[-1] > tol and k <= N:
Q = np.zeros_like(A, dtype=float)
R = np.zeros_like(A, dtype=float)
QI = []
for j in range(len(A[0])):
ai = np.array(np.zeros(len(A)))
for i in range(len(A)):
ai[i] = A[i][j]
ai_perp = 0
for i in range(j):
R[i][j] = np.dot(ai, QI[i])
ai_perp += R[i][j]*QI[i]
ai -= ai_perp
R[j][j] = np.sqrt(np.sum(ai**2))
qi = ai/R[j][j]
QI.append(qi)
i = 0
for q in qi:
Q[i][j] = q
i += 1
A = np.matmul(R, Q)
eigenvectors.append(np.diag(A))
err = np.average([norms(np.diag(A, k=-1)).l_infinity(), norms(np.diag(A, k=1)).l_infinity()])
errors.append(err)
k += 1
self.iterations = tuple(range(k))
self.lambdas = tuple(eigenvectors)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.lambdas, "Errors": self.errors})
def steepest_descent(self, x, b):
"""Approximate solution vector, x given matrix, A initial guess vector, x, and vector, b.
Parameters
----------
x : array
Numpy array.
b : array
Input numpy array.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.b : tuple
Input numpy array.
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
IndexError
If b is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
vec_name, b = "b", np.array(b)
if np.sum(b.shape) - np.sum(b.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.b = np.reshape(b,(len(b),1))
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
r = b - np.matmul(A, x)
alpha = float(np.matmul(r.T, r)[0]/np.matmul(np.matmul(r.T, A), r)[0])
x1 = x + alpha*r
eigenvectors.append(x1)
errors.append(norms(x1, x).l_infinity())
x = x1
k += 1
self.iterations = tuple(range(k))
self.lambdas = tuple(eigenvectors)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.lambdas, "Errors": self.errors})
def conjugate_gradient(self, x, b, C=None):
"""Approximate solution vector given matrix, A, initial guess vector, x, and vector, b.
Parameters
----------
x : array
Numpy array.
b : vector
Input numpy array.
C : None or matrix, optional
Preconditioning matrix.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.b : tuple
Input numpy array.
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
IndexError
If b is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
vec_name, b = "b", np.array(b)
if np.sum(b.shape) - np.sum(b.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.b = np.reshape(b,(len(b),1))
self.C = C
r0 = b - np.matmul(A, x)
if type(C) == type(None):
do_precondition = True
v0 = r0
else:
do_precondition = False
Minv = np.linalg.inv(C*np.transpose(C))
v0 = np.matmul(Minv, r0)
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
if do_precondition:
alpha = float(np.matmul(r0.T, r0)[0]/np.matmul(np.matmul(v0.T, A)[0], v0)[0])
else:
alpha = float(np.matmul(np.matmul(r0.T, Minv), r0)[0]/np.matmul(np.matmul(v0.T, A), v0)[0])
x1 = x + alpha*v0
eigenvectors.append(x1)
errors.append(norms(x1, x).l_infinity())
r1 = r0 - alpha*np.matmul(A, v0)
if do_precondition:
s1 = float(np.matmul(r1.T, r1)[0]/np.matmul(r0.T, r0)[0])
else: s1 = float(np.matmul(np.matmul(r1.T, Minv)[0], r1)[0]/np.matmul(np.matmul(r0.T, Minv)[0], r0)[0])
x, r0 = x1, r1
if do_precondition: v0 = r1 + s1*v0
else: v0 = np.matmul(Minv, r1) + s1*v0
k += 1
self.iterations = tuple(range(k))
self.eigenvectors = tuple(eigenvectors)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.eigenvectors, "Errors": self.errors})
# --------------------
# --------------------
# iterative techniques
class SingleVariableIteration:
def __init__(self, function, a, b, power=-6, variable=sp.Symbol("x"), iter_guess=True, k=0):
"""
Parameters
----------
function : expression
Input function.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
power : float, optional
Signed, specified power of tolerance until satisfying method.
variable : symbol, optional
Respected variable in derivative. Assumed to be 'x' if not stated.
iter_guess : bool or integer, optional
Boolean value of `True` by default. If integer, iterate for that integer.
k : float, optional
Absolute maximum slope of function.
Yields
------
self.function : expression
Input function.
self.a : float
Left-hand bound of interval.
self.b : float
Right-hand bound of interval.
self.tol : float
Tolerance to satisfy method.
self.variable : symbol, optional
Respected variable in derivative. Assumed to be `'x'` if not stated.
self.iter_guess : bool or integer, optional
Boolean value of `True` by default. If integer, iterate for that integer.
self.k : float, optional
Absolute maximum slope of functon. Assumed 0 if not defined.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Notes
-----
self.tol evaluated by: `10**power`.
"""
if isinstance(function, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
# elif isinstance(f, (sp.Expr)):
# f = sp.lambdify(variable, f)
# self.function = f
# print("sympy expression converted to lambda function.")
elif isinstance(function, (str)):
g = lambda x: eval(function)
function = sp.lambdify(variable, g(variable))
print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
self.function, self.variable = function, variable
self.a, self.b, self.tol = float(a), float(b), float(10**power)
self.iter_guess, self.k = iter_guess, k
def find_k(self):
"""Find greatest integer for maximum iterations for tolerance.
Returns
-------
k : float
Maximum possible slope of input function.
Yields
------
self.k : float
Maximum possible slope of input function.
Warnings
--------
Prints to console the input expression, and that the expression was in fact used.
"""
a, b, variable = self.a, self.b, self.variable
sym_function = sp.N(sp.sympify(self.function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
k = self.k
# determine form of derivative
df = sp.lambdify(variable, sp.diff(sym_function))
for alpha in np.linspace(a, b, 1000):
df_alpha = abs(df(alpha))
if df_alpha > k: k = df_alpha
self.k = k
return k
def max_iterations(self, method, p0=0):
"""Find greatest integer for maximum iterations for tolerance.
Parameters
----------
method : string
Selection of iterative method for iterations are needed.
p0 : float, optional
Initial guess for function solution.
Returns
-------
max_iter : integer
Maximum number of iterations required for specified tolerance.
Yields
------
self.max_iter : integer
Maximum number of iterations required for specified tolerance.
Raises
------
ValueError
Prescribed method is not an available option.
Warnings
--------
Informs user the maximum number of iterations for method.
Notes
-----
Will round away from zero to higher integers.
Examples
--------
If `method == 'bisection'` & a=1, b=2, and tol=-3, then:
`max_iter` >= -log(`tol`/(`b` - `a`))/log(2)
`max_iter` >= -log((10**(-3)/(2 - 1))/log(2)
`max_iter` >= 9.96
`max_iter` = 10
Else, if a=1, b=2, tol=-3, p0=1.5, nd k=0.9, then:
`max_iter` >= log(`tol`/max('p0' - `a`, `b` - `p0`))/log(k)
`max_iter` >= log(10**(-3)/max(1.5 - 1, 2 - 1.5))/log(0.9)
`max_iter` >= log(10**(-3)/0.5)/log(0.9)
`max_iter` >= 58.98
`max_iter` >= 59
"""
a, b, tol, k = self.a, self.b, self.tol, self.k
p0 = float(p0)
if method == "bisection":
max_iter = math.ceil(-math.log(tol/(b - a))/math.log(2))
elif method in ("fixed_point", "newton_raphson", "secant_method", "false_position"):
max_iter = math.ceil(-math.log(tol/max(p0 - a, b - p0))/math.log(k))
else: raise ValueError(f"ERROR! I am sorry. The desired method must be: 'bisection', 'fixed_point', 'newton_raphson', 'secant_method', or 'false_position'.")
self.max_iter = max_iter
print(f"Information: With the inputs, I will terminate the technique after so many iterations, N = {max_iter}")
return max_iter
# next 5 functions preceded by find_k & max_iterations
def bisection(self):
"""Given f(x) in [a, b] find x within tolerance. Is a root-finding method: f(x) = 0.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Relying on the Intermediate Value Theorem, this is a bracketed, root-finding method. Generates a sequence {p_n}^{inf}_{n=1} to approximate a zero of f(x), p and converges by O(1 / (2**N)).
Examples
--------
If f(x) = x**3 + 4*x**2 = 10
=> f(x) = x**3 + 4*x**2 - 10 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess = self.iter_guess
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# check if f(a) and f(b) are opposite signs
if f(a)*f(b) < 0:
if iter_guess == True:
# if left unassigned, guess
N = self.max_iterations("bisection")
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
x = (b - a)/2
p = a + x # new value, p
approximations.append(p)
if f(a)*f(p) > 0: a = p # adjust next bounds
else: b = p
errors.append(abs(x)) # error of new value, p
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = tuple(range(k))
self.approximations = tuple(approximations)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if f(a) is not opposite f(b)
else: raise ValueError(f"ERROR! Interval bounds, [a, b] = [{a}, {b}] must yield opposite signs in function, {sym_function}.")
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def false_position(self, p0, p1):
"""Given f(x) and initial guesses, p0 and p1 in [a, b] find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
First initial guess.
p1 : float
Second initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Theorem:
1) Existence of a fixed-point:
If g in C[a,b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, k < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, p in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0, p1 = float(p0), float(p1)
self.p0, self.p1 = p0, p1
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# check if f(a) and f(b) are opposites signs
if f(p0)*f(p1) < 0:
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("false position", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("false position", k=k, p0=p0)
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
q0, q1 = f(p0), f(p1)
p = p1 - q1*(p1 - p0)/(q1 - q0) # new value, p
approximations.append(p)
errors.append(abs(p - p0)) # error of new value, p
if f(p)*q1 < 0: p0 = p1 # adjust next bounds
p1 = p
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = tuple(range(k))
self.approximations = tuple(approximations)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if f(a) is not opposite f(b)
else: raise ValueError(f"ERROR! Interval bounds, [a, b] = [{a}, {b}] must yield opposite signs in function, {sym_function}.")
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def fixed_point(self, p0):
"""Given f(x) and initial guess, p0 in [a, b] find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
Initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Theorem:
1) Existence of a fixed-point:
If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, k < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0 = float(p0)
self.p0 = p0
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("fixed point", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("fixed point", k=k, p0=p0)
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
p = f(p0) # new value, p
approximations.append(p)
errors.append(abs((p - p0)/p0)) # error of new value, p
p0 = p # set future previous value
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = tuple(range(k))
self.approximations = tuple(approximations)
self.errors = tuple(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def newton_raphson(self, p0):
"""Given f(x) and initial guess, p0 in [a, b], find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
Initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
f'(x) != 0.
Not root-bracketed.
Initial guess must be close to real solution; else, will converge to different root or oscillate (if symmetric).
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Technique based on first Taylor polynomial expansion of f about p0 and evaluated at x = p. |p - p0| is assumed small; therefore, 2nd order Taylor term, the error, is small.
Newton-Raphson has quickest convergence rate.
This method can be viewed as fixed-point iteration.
Theorem:
1) Existence of a fixed-point:
If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, `k` < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0 = float(p0)
self.p0 = p0
# calculate if expression
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# determine form of derivative
df = sp.lambdify(self.variable, sp.diff(sym_function))
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("<NAME>", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("<NAME>", k=k, p0=p0)
elif isinstance(iter_guess, int):
# if defined as integer, use
N = iter_guess
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
fp0 = f(p0)
dfp0 = df(p0)
p = p0 - (fp0/dfp0) # new value, p
approximations.append(p)
errors.append(abs(p - p0)) # error of new value, p
p0 = p # set future previous value
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = tuple(range(k+1))
self.approximations = tuple(approximations)
self.errors = tuple(errors)
return | pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import cv2, os
from keras.models import load_model
import utils
# Constant
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
BATCH_SIZE = 12
# load data
Dir = './imgs'
fo = | pd.read_csv('./imgs/labels.csv') | pandas.read_csv |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = | pd.Series(numeric) | pandas.Series |
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import download_one_zip, unzip_nested_zip
CAMELS_NO_DATASET_ERROR_LOG = (
"We cannot read this dataset now. Please check if you choose the correct dataset:\n"
' ["AUS", "BR", "CA", "CL", "GB", "US", "YR"]'
)
def time_intersect_dynamic_data(obs: np.array, date: np.array, t_range: list):
"""
chose data from obs in the t_range
Parameters
----------
obs
a np array
date
all periods for obs
t_range
the time range we need, such as ["1990-01-01","2000-01-01"]
Returns
-------
np.array
the chosen data
"""
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
[c, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
class Camels(DataSourceBase):
def __init__(self, data_path, download=False, region: str = "US"):
"""
Initialization for CAMELS series dataset
Parameters
----------
data_path
where we put the dataset
download
if true, download
region
the default is CAMELS(-US), since it's the first CAMELS dataset.
Others now include: AUS, BR, CL, GB, YR
"""
super().__init__(data_path)
region_lst = ["AUS", "BR", "CA", "CE", "CL", "GB", "US", "YR"]
assert region in region_lst
self.region = region
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.camels_sites = self.read_site_info()
def get_name(self):
return "CAMELS_" + self.region
def set_data_source_describe(self) -> collections.OrderedDict:
"""
Introduce the files in the dataset and list their location in the file system
Returns
-------
collections.OrderedDict
the description for a CAMELS dataset
"""
camels_db = self.data_source_dir
if self.region == "US":
# shp file of basins
camels_shp_file = os.path.join(
camels_db, "basin_set_full_res", "HCDN_nhru_final_671.shp"
)
# config of flow data
flow_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"usgs_streamflow",
)
# forcing
forcing_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"basin_mean_forcing",
)
forcing_types = ["daymet", "maurer", "nldas"]
# attr
attr_dir = os.path.join(
camels_db, "camels_attributes_v2.0", "camels_attributes_v2.0"
)
gauge_id_file = os.path.join(attr_dir, "camels_name.txt")
attr_key_lst = ["topo", "clim", "hydro", "vege", "soil", "geol"]
download_url_lst = [
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_set_full_res.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_FORCING_TYPE=forcing_types,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
CAMELS_DOWNLOAD_URL_LST=download_url_lst,
)
elif self.region == "AUS":
# id and name
gauge_id_file = os.path.join(
camels_db,
"01_id_name_metadata",
"01_id_name_metadata",
"id_name_metadata.csv",
)
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"02_location_boundary_area",
"02_location_boundary_area",
"shp",
"CAMELS_AUS_BasinOutlets_adopted.shp",
)
# config of flow data
flow_dir = os.path.join(camels_db, "03_streamflow", "03_streamflow")
# attr
attr_dir = os.path.join(camels_db, "04_attributes", "04_attributes")
# forcing
forcing_dir = os.path.join(
camels_db, "05_hydrometeorology", "05_hydrometeorology"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "BR":
# attr
attr_dir = os.path.join(
camels_db, "01_CAMELS_BR_attributes", "01_CAMELS_BR_attributes"
)
# we don't need the location attr file
attr_key_lst = [
"climate",
"geology",
"human_intervention",
"hydrology",
"land_cover",
"quality_check",
"soil",
"topography",
]
# id and name, there are two types stations in CAMELS_BR, and we only chose the 897-stations version
gauge_id_file = os.path.join(attr_dir, "camels_br_topography.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"14_CAMELS_BR_catchment_boundaries",
"14_CAMELS_BR_catchment_boundaries",
"camels_br_catchments.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(
camels_db, "02_CAMELS_BR_streamflow_m3s", "02_CAMELS_BR_streamflow_m3s"
)
flow_dir_mm_selected_catchments = os.path.join(
camels_db,
"03_CAMELS_BR_streamflow_mm_selected_catchments",
"03_CAMELS_BR_streamflow_mm_selected_catchments",
)
flow_dir_simulated = os.path.join(
camels_db,
"04_CAMELS_BR_streamflow_simulated",
"04_CAMELS_BR_streamflow_simulated",
)
# forcing
forcing_dir_precipitation_chirps = os.path.join(
camels_db,
"05_CAMELS_BR_precipitation_chirps",
"05_CAMELS_BR_precipitation_chirps",
)
forcing_dir_precipitation_mswep = os.path.join(
camels_db,
"06_CAMELS_BR_precipitation_mswep",
"06_CAMELS_BR_precipitation_mswep",
)
forcing_dir_precipitation_cpc = os.path.join(
camels_db,
"07_CAMELS_BR_precipitation_cpc",
"07_CAMELS_BR_precipitation_cpc",
)
forcing_dir_evapotransp_gleam = os.path.join(
camels_db,
"08_CAMELS_BR_evapotransp_gleam",
"08_CAMELS_BR_evapotransp_gleam",
)
forcing_dir_evapotransp_mgb = os.path.join(
camels_db,
"09_CAMELS_BR_evapotransp_mgb",
"09_CAMELS_BR_evapotransp_mgb",
)
forcing_dir_potential_evapotransp_gleam = os.path.join(
camels_db,
"10_CAMELS_BR_potential_evapotransp_gleam",
"10_CAMELS_BR_potential_evapotransp_gleam",
)
forcing_dir_temperature_min_cpc = os.path.join(
camels_db,
"11_CAMELS_BR_temperature_min_cpc",
"11_CAMELS_BR_temperature_min_cpc",
)
forcing_dir_temperature_mean_cpc = os.path.join(
camels_db,
"12_CAMELS_BR_temperature_mean_cpc",
"12_CAMELS_BR_temperature_mean_cpc",
)
forcing_dir_temperature_max_cpc = os.path.join(
camels_db,
"13_CAMELS_BR_temperature_max_cpc",
"13_CAMELS_BR_temperature_max_cpc",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[
flow_dir_m3s,
flow_dir_mm_selected_catchments,
flow_dir_simulated,
],
CAMELS_FORCING_DIR=[
forcing_dir_precipitation_chirps,
forcing_dir_precipitation_mswep,
forcing_dir_precipitation_cpc,
forcing_dir_evapotransp_gleam,
forcing_dir_evapotransp_mgb,
forcing_dir_potential_evapotransp_gleam,
forcing_dir_temperature_min_cpc,
forcing_dir_temperature_mean_cpc,
forcing_dir_temperature_max_cpc,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "CL":
# attr
attr_dir = os.path.join(camels_db, "1_CAMELScl_attributes")
attr_file = os.path.join(attr_dir, "1_CAMELScl_attributes.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"CAMELScl_catchment_boundaries",
"catchments_camels_cl_v1.3.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(camels_db, "2_CAMELScl_streamflow_m3s")
flow_dir_mm = os.path.join(camels_db, "3_CAMELScl_streamflow_mm")
# forcing
forcing_dir_precip_cr2met = os.path.join(
camels_db, "4_CAMELScl_precip_cr2met"
)
forcing_dir_precip_chirps = os.path.join(
camels_db, "5_CAMELScl_precip_chirps"
)
forcing_dir_precip_mswep = os.path.join(
camels_db, "6_CAMELScl_precip_mswep"
)
forcing_dir_precip_tmpa = os.path.join(camels_db, "7_CAMELScl_precip_tmpa")
forcing_dir_tmin_cr2met = os.path.join(camels_db, "8_CAMELScl_tmin_cr2met")
forcing_dir_tmax_cr2met = os.path.join(camels_db, "9_CAMELScl_tmax_cr2met")
forcing_dir_tmean_cr2met = os.path.join(
camels_db, "10_CAMELScl_tmean_cr2met"
)
forcing_dir_pet_8d_modis = os.path.join(
camels_db, "11_CAMELScl_pet_8d_modis"
)
forcing_dir_pet_hargreaves = os.path.join(
camels_db,
"12_CAMELScl_pet_hargreaves",
)
forcing_dir_swe = os.path.join(camels_db, "13_CAMELScl_swe")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[flow_dir_m3s, flow_dir_mm],
CAMELS_FORCING_DIR=[
forcing_dir_precip_cr2met,
forcing_dir_precip_chirps,
forcing_dir_precip_mswep,
forcing_dir_precip_tmpa,
forcing_dir_tmin_cr2met,
forcing_dir_tmax_cr2met,
forcing_dir_tmean_cr2met,
forcing_dir_pet_8d_modis,
forcing_dir_pet_hargreaves,
forcing_dir_swe,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=attr_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "GB":
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"CAMELS_GB_catchment_boundaries",
"CAMELS_GB_catchment_boundaries.shp",
)
# flow and forcing data are in a same file
flow_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"timeseries",
)
forcing_dir = flow_dir
# attr
attr_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
)
gauge_id_file = os.path.join(
attr_dir, "CAMELS_GB_hydrometry_attributes.csv"
)
attr_key_lst = [
"climatic",
"humaninfluence",
"hydrogeology",
"hydrologic",
"hydrometry",
"landcover",
"soil",
"topographic",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "YR":
# shp files of basins
camels_shp_files_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "Normal_Camels_YR_basin_boundary"
)
# attr, flow and forcing data are all in the same dir. each basin has one dir.
flow_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "1_Normal_Camels_YR_basin_data"
)
forcing_dir = flow_dir
attr_dir = flow_dir
# no gauge id file for CAMELS_YR; natural_watersheds.txt showed unregulated basins in CAMELS_YR
gauge_id_file = os.path.join(
camels_db, "9_Normal_Camels_YR", "natural_watersheds.txt"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CA":
# shp file of basins
camels_shp_files_dir = os.path.join(camels_db, "CANOPEX_BOUNDARIES")
# config of flow data
flow_dir = os.path.join(
camels_db, "CANOPEX_NRCAN_ASCII", "CANOPEX_NRCAN_ASCII"
)
forcing_dir = flow_dir
# There is no attr data in CANOPEX, hence we use attr from HYSET -- https://osf.io/7fn4c/
attr_dir = camels_db
gauge_id_file = os.path.join(camels_db, "STATION_METADATA.xlsx")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CE":
# We use A_basins_total_upstrm
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"3_shapefiles",
"Basins_A.shp",
)
# config of flow data
flow_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "D_gauges", "2_timeseries", "daily"
)
forcing_dir = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"2_timeseries",
"daily",
)
attr_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "A_basins_total_upstrm", "1_attributes"
)
gauge_id_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"D_gauges",
"1_attributes",
"Gauge_attributes.csv",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def download_data_source(self) -> None:
"""
Download CAMELS dataset.
Now we only support CAMELS-US's downloading.
For others, please download it manually and put all files of a CAMELS dataset in one directory.
For example, all files of CAMELS_AUS should be put in "camels_aus" directory
Returns
-------
None
"""
camels_config = self.data_source_description
if self.region == "US":
if not os.path.isdir(camels_config["CAMELS_DIR"]):
os.makedirs(camels_config["CAMELS_DIR"])
[
download_one_zip(attr_url, camels_config["CAMELS_DIR"])
for attr_url in camels_config["CAMELS_DOWNLOAD_URL_LST"]
if not os.path.isfile(
os.path.join(camels_config["CAMELS_DIR"], attr_url.split("/")[-1])
)
]
print("The CAMELS_US data have been downloaded!")
print(
"Please download it manually and put all files of a CAMELS dataset in the CAMELS_DIR directory."
)
print("We unzip all files now.")
if self.region == "CE":
# We only use CE's dauly files now and it is tar.gz formatting
file = tarfile.open(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily.tar.gz")
)
# extracting file
file.extractall(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily")
)
file.close()
for f_name in os.listdir(camels_config["CAMELS_DIR"]):
if fnmatch.fnmatch(f_name, "*.zip"):
unzip_dir = os.path.join(camels_config["CAMELS_DIR"], f_name[0:-4])
file_name = os.path.join(camels_config["CAMELS_DIR"], f_name)
unzip_nested_zip(file_name, unzip_dir)
def read_site_info(self) -> pd.DataFrame:
"""
Read the basic information of gages in a CAMELS dataset
Returns
-------
pd.DataFrame
basic info of gages
"""
camels_file = self.data_source_description["CAMELS_GAUGE_FILE"]
if self.region == "US":
data = pd.read_csv(
camels_file, sep=";", dtype={"gauge_id": str, "huc_02": str}
)
elif self.region == "AUS":
data = pd.read_csv(camels_file, sep=",", dtype={"station_id": str})
elif self.region == "BR":
data = pd.read_csv(camels_file, sep="\s+", dtype={"gauge_id": str})
elif self.region == "CL":
data = pd.read_csv(camels_file, sep="\t", index_col=0)
elif self.region == "GB":
data = pd.read_csv(camels_file, sep=",", dtype={"gauge_id": str})
elif self.region == "YR":
dirs_ = os.listdir(self.data_source_description["CAMELS_ATTR_DIR"])
data = pd.DataFrame({"gauge_id": dirs_})
elif self.region == "CA":
data = pd.read_excel(camels_file)
elif self.region == "CE":
data = pd.read_csv(camels_file, sep=";")
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return data
def get_constant_cols(self) -> np.array:
"""
all readable attrs in CAMELS
Returns
-------
np.array
attribute types
"""
data_folder = self.data_source_description["CAMELS_ATTR_DIR"]
if self.region == "US":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep=";")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "AUS":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"CAMELS_AUS_Attributes-Indices_MasterTable.csv",
)
camels_aus_attr_indices_data = pd.read_csv(attr_all_file, sep=",")
# exclude station id
return camels_aus_attr_indices_data.columns.values[1:]
elif self.region == "BR":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_br_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep="\s+")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "CL":
camels_cl_attr_data = self.camels_sites
# exclude station id
return camels_cl_attr_data.index.values
elif self.region == "GB":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(
data_folder, "CAMELS_GB_" + key + "_attributes.csv"
)
data_temp = pd.read_csv(data_file, sep=",")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "YR":
attr_json_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"0000",
"attributes.json",
)
attr_json = hydro_utils.unserialize_json_ordered(attr_json_file)
return np.array(list(attr_json.keys()))
elif self.region == "CA":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
canopex_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
# exclude HYSETS watershed id
return canopex_attr_indices_data.columns.values[1:]
elif self.region == "CE":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
lamah_ce_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
return lamah_ce_attr_indices_data.columns.values[1:]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_relevant_cols(self) -> np.array:
"""
all readable forcing types
Returns
-------
np.array
forcing types
"""
if self.region == "US":
return np.array(["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"])
elif self.region == "AUS":
forcing_types = []
for root, dirs, files in os.walk(
self.data_source_description["CAMELS_FORCING_DIR"]
):
if root == self.data_source_description["CAMELS_FORCING_DIR"]:
continue
for file in files:
forcing_types.append(file[:-4])
return np.array(forcing_types)
elif self.region == "BR":
return np.array(
[
forcing_dir.split(os.sep)[-1][13:]
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "CL":
return np.array(
[
"_".join(forcing_dir.split(os.sep)[-1].split("_")[2:])
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "GB":
return np.array(
[
"precipitation",
"pet",
"temperature",
"peti",
"humidity",
"shortwave_rad",
"longwave_rad",
"windspeed",
]
)
elif self.region == "YR":
return np.array(
[
"pre",
"evp",
"gst_mean",
"prs_mean",
"tem_mean",
"rhu",
"win_mean",
"gst_min",
"prs_min",
"tem_min",
"gst_max",
"prs_max",
"tem_max",
"ssd",
"win_max",
]
)
elif self.region == "CA":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(["prcp", "tmax", "tmin"])
elif self.region == "CE":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(
[
"2m_temp_max",
"2m_temp_mean",
"2m_temp_min",
"2m_dp_temp_max",
"2m_dp_temp_mean",
"2m_dp_temp_min",
"10m_wind_u",
"10m_wind_v",
"fcst_alb",
"lai_high_veg",
"lai_low_veg",
"swe",
"surf_net_solar_rad_max",
"surf_net_solar_rad_mean",
"surf_net_therm_rad_max",
"surf_net_therm_rad_mean",
"surf_press",
"total_et",
"prec",
"volsw_123",
"volsw_4",
]
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_target_cols(self) -> np.array:
"""
For CAMELS, the target vars are streamflows
Returns
-------
np.array
streamflow types
"""
if self.region == "US":
return np.array(["usgsFlow"])
elif self.region == "AUS":
# QualityCodes are not streamflow data.
# MLd means "1 Megaliters Per Day"; 1 MLd = 0.011574074074074 cubic-meters-per-second
# mmd means "mm/day"
return np.array(
[
"streamflow_MLd",
"streamflow_MLd_inclInfilled",
"streamflow_mmd",
"streamflow_QualityCodes",
]
)
elif self.region == "BR":
return np.array(
[
flow_dir.split(os.sep)[-1][13:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "CL":
return np.array(
[
flow_dir.split(os.sep)[-1][11:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "GB":
return np.array(["discharge_spec", "discharge_vol"])
elif self.region == "YR":
return np.array(["normalized_q"])
elif self.region == "CA":
return np.array(["discharge"])
elif self.region == "CE":
return np.array(["qobs"])
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_other_cols(self) -> dict:
return {
"FDC": {"time_range": ["1980-01-01", "2000-01-01"], "quantile_num": 100}
}
def read_object_ids(self, **kwargs) -> np.array:
"""
read station ids
Parameters
----------
**kwargs
optional params if needed
Returns
-------
np.array
gage/station ids
"""
if self.region in ["BR", "GB", "US", "YR"]:
return self.camels_sites["gauge_id"].values
elif self.region == "AUS":
return self.camels_sites["station_id"].values
elif self.region == "CL":
station_ids = self.camels_sites.columns.values
# for 7-digit id, replace the space with 0 to get a 8-digit id
cl_station_ids = [
station_id.split(" ")[-1].zfill(8) for station_id in station_ids
]
return np.array(cl_station_ids)
elif self.region == "CA":
ids = self.camels_sites["STATION_ID"].values
id_strs = [id_.split("'")[1] for id_ in ids]
# although there are 698 sites, there are only 611 sites with attributes data.
# Hence we only use 611 sites now
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
if not os.path.isfile(attr_all_file):
raise FileNotFoundError(
"Please download HYSETS_watershed_properties.txt from https://osf.io/7fn4c/ and put it in the "
"root directory of CANOPEX"
)
canopex_attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(id_strs, canopex_attr_data["Official_ID"].values)
elif self.region == "CE":
# Not all basins have attributes, so we just chose those with attrs
ids = self.camels_sites["ID"].values
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(ids, attr_data["ID"].values)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def read_usgs_gage(self, usgs_id, t_range):
"""
read streamflow data of a station from CAMELS-US
Parameters
----------
usgs_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
Returns
-------
np.array
streamflow data of one station for a given time range
"""
print("reading %s streamflow data", usgs_id)
gage_id_df = self.camels_sites
huc = gage_id_df[gage_id_df["gauge_id"] == usgs_id]["huc_02"].values[0]
usgs_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
huc,
usgs_id + "_streamflow_qc.txt",
)
data_temp = pd.read_csv(usgs_file, sep=r"\s+", header=None)
obs = data_temp[4].values
obs[obs < 0] = np.nan
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
df_date = data_temp[[1, 2, 3]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[C, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
def read_br_gage_flow(self, gage_id, t_range, flow_type):
"""
Read gage's streamflow from CAMELS-BR
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
flow_type
"streamflow_m3s" or "streamflow_mm_selected_catchments" or "streamflow_simulated"
Returns
-------
np.array
streamflow data of one station for a given time range
"""
dir_ = [
flow_dir
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
if flow_type in flow_dir
][0]
if flow_type == "streamflow_mm_selected_catchments":
flow_type = "streamflow_mm"
elif flow_type == "streamflow_simulated":
flow_type = "simulated_streamflow"
gage_file = os.path.join(dir_, gage_id + "_" + flow_type + ".txt")
data_temp = pd.read_csv(gage_file, sep=r"\s+")
obs = data_temp.iloc[:, 3].values
obs[obs < 0] = np.nan
df_date = data_temp[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_gb_gage_flow_forcing(self, gage_id, t_range, var_type):
"""
Read gage's streamflow or forcing from CAMELS-GB
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
var_type
flow type: "discharge_spec" or "discharge_vol"
forcing type: "precipitation", "pet", "temperature", "peti", "humidity", "shortwave_rad", "longwave_rad",
"windspeed"
Returns
-------
np.array
streamflow or forcing data of one station for a given time range
"""
gage_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"CAMELS_GB_hydromet_timeseries_" + gage_id + "_19701001-20150930.csv",
)
data_temp = pd.read_csv(gage_file, sep=",")
obs = data_temp[var_type].values
if var_type in ["discharge_spec", "discharge_vol"]:
obs[obs < 0] = np.nan
date = pd.to_datetime(data_temp["date"]).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_target_cols(
self,
gage_id_lst: Union[list, np.array] = None,
t_range: list = None,
target_cols: Union[list, np.array] = None,
**kwargs
) -> np.array:
"""
read target values; for CAMELS, they are streamflows
default target_cols is an one-value list
Parameters
----------
gage_id_lst
station ids
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
target_cols
the default is None, but we neea at least one default target.
For CAMELS-US, it is ["usgsFlow"];
for CAMELS-AUS, it's ["streamflow_mmd"]
for CAMELS-AUS, it's ["streamflow_m3s"]
kwargs
some other params if needed
Returns
-------
np.array
streamflow data, 3-dim [station, time, streamflow]
"""
if target_cols is None:
return np.array([])
else:
nf = len(target_cols)
t_range_list = hydro_utils.t_range_days(t_range)
nt = t_range_list.shape[0]
y = np.empty([len(gage_id_lst), nt, nf])
if self.region == "US":
for k in range(len(gage_id_lst)):
data_obs = self.read_usgs_gage(gage_id_lst[k], t_range)
# For CAMELS-US, only ["usgsFlow"]
y[k, :, 0] = data_obs
elif self.region == "AUS":
for k in range(len(target_cols)):
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
target_cols[k] + ".csv",
)
)
df_date = flow_data[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
chosen_data = flow_data[gage_id_lst].values[ind1, :]
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.T
elif self.region == "BR":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_br_gage_flow(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "CL":
for k in range(len(target_cols)):
if target_cols[k] == "streamflow_m3s":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][0],
"2_CAMELScl_streamflow_m3s.txt",
),
sep="\t",
index_col=0,
)
elif target_cols[k] == "streamflow_mm":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][1],
"3_CAMELScl_streamflow_mm.txt",
),
sep="\t",
index_col=0,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
date = pd.to_datetime(flow_data.index.values).values.astype(
"datetime64[D]"
)
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
station_ids = self.read_object_ids()
assert all(x < y for x, y in zip(station_ids, station_ids[1:]))
[s, ind3, ind4] = np.intersect1d(
station_ids, gage_id_lst, return_indices=True
)
chosen_data = flow_data.iloc[ind1, ind3].replace(
"\s+", np.nan, regex=True
)
chosen_data = chosen_data.astype(float)
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.values.T
elif self.region == "GB":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_gb_gage_flow_forcing(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "YR":
for k in range(len(gage_id_lst)):
# only one streamflow type: normalized_q
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
gage_id_lst[k],
target_cols[0] + ".csv",
)
flow_data = pd.read_csv(flow_file, sep=",")
date = pd.to_datetime(flow_data["date"]).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
# flow data has been normalized, so we don't set negative values NaN
y[k, ind2, 0] = flow_data["q"].values[ind1]
elif self.region == "CA":
for k in range(len(gage_id_lst)):
# only one streamflow type: discharge
canopex_id = self.camels_sites[
self.camels_sites["STATION_ID"] == "'" + gage_id_lst[k] + "'"
]["CANOPEX_ID"].values[0]
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
str(canopex_id) + ".dly",
)
read_flow_file = pd.read_csv(flow_file, header=None).values.tolist()
flow_data = []
flow_date = []
for one_site in read_flow_file:
flow_date.append(
hydro_utils.t2dt(int(one_site[0][:8].replace(" ", "0")))
)
all_data = one_site[0].split(" ")
real_data = [one_data for one_data in all_data if one_data != ""]
flow_data.append(float(real_data[-3]))
date = pd.to_datetime(flow_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
obs = np.array(flow_data)
obs[obs < 0] = np.nan
y[k, ind2, 0] = obs[ind1]
elif self.region == "CE":
for k in range(len(gage_id_lst)):
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"ID_" + str(gage_id_lst[k]) + ".csv",
)
flow_data = pd.read_csv(flow_file, sep=";")
df_date = flow_data[["YYYY", "MM", "DD"]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
obs = flow_data["qobs"].values
obs[obs < 0] = np.nan
y[k, ind2, 0] = obs[ind1]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return y
def read_forcing_gage(self, usgs_id, var_lst, t_range_list, forcing_type="daymet"):
# data_source = daymet or maurer or nldas
print("reading %s forcing data", usgs_id)
gage_id_df = self.camels_sites
huc = gage_id_df[gage_id_df["gauge_id"] == usgs_id]["huc_02"].values[0]
data_folder = self.data_source_description["CAMELS_FORCING_DIR"]
if forcing_type == "daymet":
temp_s = "cida"
else:
temp_s = forcing_type
data_file = os.path.join(
data_folder,
forcing_type,
huc,
"%s_lump_%s_forcing_leap.txt" % (usgs_id, temp_s),
)
data_temp = pd.read_csv(data_file, sep=r"\s+", header=None, skiprows=4)
forcing_lst = [
"Year",
"Mnth",
"Day",
"Hr",
"dayl",
"prcp",
"srad",
"swe",
"tmax",
"tmin",
"vp",
]
df_date = data_temp[[0, 1, 2]]
df_date.columns = ["year", "month", "day"]
date = | pd.to_datetime(df_date) | pandas.to_datetime |
import sys
import numpy
import json
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import permutations
def is_significant(X, delta):
X = numpy.array(X)
assessment = X > delta
return assessment
def is_relevant_to_conformation(X_a, X_b, delta):
"""Assess relevance of interactions relative to a conformation in contrast to another above some threshold"""
X_a = numpy.array(X_a)
X_b = numpy.array(X_b)
try:
dX = X_a - X_b
assessment = dX > delta
return assessment
except:
print("Check input vectors")
def is_robust_after_md(X_init, X_final, delta_2):
"""Assess probability change between different simulation stages up to some threshold"""
X_init = numpy.array(X_init)
X_final = numpy.array(X_final)
dX = X_final - X_init
bol_assessment = abs(dX) < delta_2
return bol_assessment
def is_relevant_and_robust(P, conformations, md_stages, Deltas):
"""Assess whethere relevance of iteractions per conformations is robust under MD stages"""
delta_0 = Deltas[0]
delta_1 = Deltas[1]
delta_2 = Deltas[2]
stage_init = md_stages[0]; stage_final = md_stages[-1]
Z = []
for C_ab in list(permutations(conformations,2)):
C_a = C_ab[0]; C_b = C_ab[1];
X_a_init = P[C_a][stage_init] ; X_a_final = P[C_a][stage_final]
X_b_init = P[C_b][stage_init] ; X_b_final = P[C_b][stage_final]
############################################
test0_a_init = is_significant(X_a_init, delta_0)
test1_a_init = is_relevant_to_conformation(X_a_init, X_b_init, delta_1)
test1_a_final = is_relevant_to_conformation(X_a_final, X_b_final, delta_1)
test2_a = is_robust_after_md(X_a_final - X_a_init, X_b_final - X_b_init, delta_2)
############################################
overall = numpy.array([
test0_a_init,
test1_a_init*test1_a_final,
test2_a
]).T
bol_assessment = [all(x) for x in overall]
Z.append(list(map(int,bol_assessment)))
return Z
########################################################
if __name__ == "__main__" :
df_name = sys.argv[1] # JSON DF input
outfile = sys.argv[2] # JSON output
#########################################################
# LOAD DataFrame
#########################################################
workdir = '/projects/s21/ba13026/Wza_Modeling/L-structures/rosetta/bg_test/md_relax/md_100ns_dbs/'
df_path = workdir+df_name
with open(df_path,'r') as fp:
df_test = pd.DataFrame(json.load(fp))
#########################################################
# INTERACTION ASSESSMENT
#########################################################
## Set up values
md_stages = ['docked', 'prmd']
mutant_names = ['cWza','cWza-K375C','cWza-S355C','cWza-Y373C']
CONFORMATIONS = {
'cWza' : ['Conformation0', 'Conformation1'],
'cWza-K375C' : ['Conformation0', 'Conformation1'],
'cWza-S355C' : ['Conformation0', 'Conformation1'],
'cWza-Y373C' : ['Conformation1']
}
## Threshold values
Deltas = [0.1, 0.1, 0.4]
dc = 0
ASSESSMENT = {}
for i in range(len(mutant_names)):
mutant = mutant_names[i]
conformations = CONFORMATIONS[mutant]
if mutant != 'cWza-Y373C':
#########################################################
# Extract interaction data for mutant conformations and MD stages
#########################################################
if mutant != 'cWza-K375C':
df_columns = [str(0+dc),str(14+dc),str(1+dc),str(15+dc)] # Docked and PRMD data, per conformations
else:
df_columns = [str(0+dc),str(15+dc),str(1+dc),str(14+dc)] # Docked and PRMD data, per conformations
df_mutant = df_test[df_columns].fillna(0) # Replace NaN entries with zeros
## Conformations and MD stages to compare
P = {
'Conformation0' : {
'docked': numpy.array(df_mutant[df_columns[0]]),
'prmd' : numpy.array(df_mutant[df_columns[1]])
},
'Conformation1' : {
'docked': numpy.array(df_mutant[df_columns[2]]),
'prmd' : numpy.array(df_mutant[df_columns[3]])
}
}
#########################################################
# Interaction assessment
#########################################################
assessment = is_relevant_and_robust(P, conformations, md_stages, Deltas)
ASSESSMENT[mutant] = assessment
else:
#########################################################
# Extract interaction data for mutant single conformation and MD stages
#########################################################
df_columns = [str(0+dc),str(14+dc)] # Docked and PRMD data, per conformations
df_mutant = df_test[df_columns].fillna(0) # Replace NaN entries with zeros
## Conformations and MD stages to compare
P = {
'Conformation1' : {
'docked': numpy.array(df_mutant[df_columns[0]]),
'prmd' : numpy.array(df_mutant[df_columns[1]])
}
}
#########################################################
# Interaction assessment
#########################################################
delta_0 = Deltas[0]; delta_2 = Deltas[2]
C = 'Conformation1'
X_init = P[C]['docked']; X_final = P[C]['prmd'];
test_0 = is_significant(X_init, delta_0)
test_1 = is_robust_after_md(X_init, X_final, delta_2)
overall = numpy.array([
test_0,
test_1
]).T
assessment = list(map(int,[all(x) for x in overall]))
#########################################################
ASSESSMENT[mutant] = assessment
dc += 2
#########################################################
# Save data in DF as JSON file
#########################################################
# DF Columns
Columns = [
'cWza:C0',
'cWza:C1',
'cWza-K375C:C0',
'cWza-K375C:C1',
'cWza-S355C:C0',
'cWza-S355C:C1',
'cWza-Y373C:C'
]
# Get data for heatmap
inter_dict = ASSESSMENT
Data = numpy.array([
inter_dict['cWza'][0],
inter_dict['cWza'][1],
inter_dict['cWza-K375C'][0],
inter_dict['cWza-K375C'][1],
inter_dict['cWza-S355C'][0],
inter_dict['cWza-S355C'][1],
inter_dict['cWza-Y373C']
]).T
# Define DF and filter out
df_out = | pd.DataFrame(Data, columns=Columns) | pandas.DataFrame |
# coding: utf-8
# # Um mês depois do primeiro mutirão
#
# https://datasciencebr.com/um-m%C3%AAs-depois-do-primeiro-mutir%C3%A3o-369975af4bb5
# In[1]:
import numpy as np
import pandas as pd
from serenata_toolbox.datasets import fetch
fetch('2016-12-06-reimbursements.xz', '../data')
reimbursements = pd.read_csv('../data/2016-12-06-reimbursements.xz',
dtype={'document_number': np.str, 'year': np.str},
low_memory=False)
# In[2]:
import os.path
import urllib.request
import zipfile
inbox_url = 'https://github.com/datasciencebr/serenata-de-amor-inbox/archive/master.zip'
inbox_filepath = '/tmp/master.zip'
if not os.path.exists(inbox_filepath):
urllib.request.urlretrieve(inbox_url, inbox_filepath)
if not os.path.exists('/tmp/serenata-de-amor-inbox'):
zip_ref = zipfile.ZipFile(inbox_filepath, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
# In[3]:
emails = sc.wholeTextFiles('/tmp/serenata-de-amor-inbox-master/Pedido de Acesso a Informacao/**/message.txt')
emails.count()
# In[4]:
import os
import re
emails = sc.wholeTextFiles('/tmp/serenata-de-amor-inbox-master/Resposta da Camara/**/message.txt')
emails = emails.filter(lambda txt: 'Discussion Thread' in txt[1])
messages = emails .map(lambda txt: txt[1].split('\n--------------')) .map(lambda txt: next(x for x in txt if 'Resposta By E-mail' in x)) .map(lambda txt: re.sub(r'(?:\-){2,}', '', txt))
print(messages.count())
# In[5]:
emails_with_return = messages .filter(lambda txt: 'devolução' in txt)
emails_lines_with_return = emails_with_return .map(lambda txt: txt.split('\n')) .cache()
regex = r'R\$ ((?:\d+)(?:,\d+)?)'
def get_report_id(line):
return re.search(r'Question Reference.+(\d{6}\-\d{6})', line).groups()[0]
def returned_amount(string):
match = re.search(regex, string)
value = match.group(1) if match else ''
return float(value.replace(',', '.'))
report_ids_with_return = emails_lines_with_return .map(lambda lines: [line for line in lines if 'Question Reference' in line]) .map(lambda lines: get_report_id(lines[0])) .collect()
values = emails_lines_with_return .map(lambda txt: next(x for x in txt if 'devolução' in x)) .map(returned_amount)
values.count(), values.sum()
# In[6]:
returned_values = pd.DataFrame([
pd.Series(report_ids_with_return, name='report_id'),
pd.Series(values.collect(), name='returned_value'),
]).T
# In[7]:
def get_document_number(line):
return re.search(r'numAno=(\d{4}).+idDocumento=(\d+)', line).groups()[0:2]
def get_investigator(line):
return re.search(r'\((.+)\) \(', line).groups()[0]
email_lines = messages .map(lambda txt: txt.split('\n')) .cache()
document_numbers = email_lines .map(lambda lines: [line for line in lines if 'idDocumento=' in line]) .map(lambda lines: [get_document_number(line) for line in lines]) .collect()
report_ids = email_lines .map(lambda lines: [line for line in lines if 'Question Reference' in line]) .map(lambda lines: get_report_id(lines[0])) .collect()
import itertools
reports = pd.DataFrame([
pd.Series(document_numbers, name='document'),
pd.Series(report_ids, name='report_id'),
]).T
documents = list(itertools.chain(*reports['document'].values))
report_docs = pd.DataFrame(documents,
columns=('year', 'document_number'))
report_docs = pd.merge(report_docs,
reimbursements,
how='left')
def matching_id(values):
does_match = (report_docs['year'] == values[0]) & (report_docs['document_number'] == values[1])
return report_docs.loc[does_match, 'document_id'].iloc[0]
reports = pd.merge(reports, returned_values, how='left')
reports['document_id'] = reports['document'] .apply(lambda row: [matching_id(x) for x in row])
new_reports = reports .apply(lambda x: | pd.Series(x['document_id']) | pandas.Series |
""" OOI Object """
import datetime
import logging
import os
import re
import threading
import time
import warnings
from io import StringIO
from queue import Queue
import gevent
import pandas as pd
import pytz
import requests
import s3fs
import urllib3
import xarray as xr
from dateutil import parser
from lxml.html import fromstring as html_parser
from yodapy.datasources.ooi.CAVA import CAVA
from yodapy.datasources.ooi.helpers import set_thread
from yodapy.utils.conn import (
download_url,
fetch_url,
fetch_xr,
get_download_urls,
instrument_to_query,
perform_ek60_download,
perform_ek60_processing,
)
from yodapy.utils.files import CREDENTIALS_FILE
from yodapy.utils.parser import (
get_instrument_list,
get_nc_urls,
parse_annotations_json,
parse_deployments_json,
parse_global_range_dataframe,
parse_parameter_streams_dataframe,
parse_raw_data_catalog,
parse_streams_dataframe,
parse_toc_instruments,
unix_time_millis,
)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(
level=logging.INFO, format="(%(threadName)-10s) %(message)s"
)
logger = logging.getLogger(__name__)
print_lock = threading.Lock()
DATA_TEAM_GITHUB_INFRASTRUCTURE = "https://raw.githubusercontent.com/ooi-data-review/datateam-portal-backend/master/infrastructure"
FILE_SYSTEM = s3fs.S3FileSystem(anon=True)
BUCKET_DATA = "io2data/data"
class OOI(CAVA):
"""OOI Object for Ocean Observatories Initiative Data Retrieval.
Attributes:
ooi_name (str): Username for OOI API Data Access.
ooi_token (str): Token for OOI API Data Access.
source_name (str): Data source name.
regions (pandas.DataFrame): Table of OOI regions.
sites (pandas.DataFrame): Table of OOI sites.
instruments (pandas.DataFrame): Table of available instrument streams.
global_ranges (pandas.DataFrame): Table of global ranges for each instrument streams.
deployments (pandas.DataFrame): Table of deployments for filtered instrument streams.
annotations (pandas.DataFrame): Table of annotations for filtered instrument streams.
start_date (list): List of start dates requested.
end_date (list): List of end dates requested.
last_request (list): List of requested urls and parameters.
last_m2m_urls (list): List of requested M2M urls.
cava_arrays (pandas.DataFrame): Cabled array team Arrays vocab table.
cava_sites (pandas.DataFrame): Cabled array team Sites vocab table.
cava_infrastructures (pandas.DataFrame): Cabled array team Infrastructures vocab table.
cava_instruments (pandas.DataFrame): Cabled array team Instruments vocab table.
cava_parameters (pandas.DataFrame): Cabled array team Parameters vocab table.
"""
def __init__(
self, ooi_username=None, ooi_token=None, cloud_source=False, **kwargs
):
super().__init__()
self._source_name = "OOI"
self._start_date = None
self._end_date = None
# Private global variables
self._OOI_M2M_VOCAB = (
"https://ooinet.oceanobservatories.org/api/m2m/12586/vocab"
)
self._OOI_M2M_TOC = "https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/toc"
self._OOI_M2M_STREAMS = (
"https://ooinet.oceanobservatories.org/api/m2m/12575/stream"
)
self._OOI_DATA_URL = (
"https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv"
)
self._OOI_M2M_ANNOTATIONS = (
"https://ooinet.oceanobservatories.org/api/m2m/12580/anno/find"
)
self._OOI_M2M_DEPLOYMENT_QUERY = "https://ooinet.oceanobservatories.org/api/m2m/12587/events/deployment/query"
# From visualocean
self._OOI_VISUALOCEAN_M_STATS = (
"https://ooi-visualocean.whoi.edu/instruments/stats-monthly"
)
self._OOI_GLOBAL_RANGE = "https://raw.githubusercontent.com/ooi-integration/qc-lookup/master/data_qc_global_range_values.csv"
# From GitHub
self._OOI_PORTAL_REGIONS = (
f"{DATA_TEAM_GITHUB_INFRASTRUCTURE}/regions.csv"
)
self._OOI_PORTAL_SITES = f"{DATA_TEAM_GITHUB_INFRASTRUCTURE}/sites.csv"
# Not used
# self._OOI_VOCAB = 'https://raw.githubusercontent.com/ooi-integration/asset-management/master/vocab/vocab.csv'
self._regions = None
self._sites = None
# User inputs
self.ooi_username = ooi_username
self.ooi_token = ooi_token
# Private cache variables
self._rvocab = None
self._rglobal_range = None
self._rstreams = None
self._rtoc = None
self._raw_datadf = None
self._raw_data_url = None
# For bio-acoustic sonar
self._zplsc_data_catalog = None
self._raw_file_dict = None
self._data_type = None
self._current_data_catalog = None
self._filtered_data_catalog = None
self._q = None
self._raw_data = []
self._dataset_list = []
self._netcdf_urls = []
# Cloud copy
self._s3content = None
self._cloud_source = cloud_source
# ----------- Session Configs ---------------------
self._session = requests.Session()
self._pool_connections = kwargs.get("pool_connections", 100)
self._pool_maxsize = kwargs.get("pool_maxsize", 100)
self._adapter = requests.adapters.HTTPAdapter(
pool_connections=self._pool_connections,
pool_maxsize=self._pool_maxsize,
)
self._session.mount("https://", self._adapter)
self._session.verify = False
# --------------------------------------------------
self._request_urls = None
self._last_m2m_urls = []
self._last_download_list = None
self._last_downloaded_netcdfs = None
self._thread_list = []
self._setup()
@property
def regions(self):
""" Returns the OOI regions """
if not isinstance(self._regions, pd.DataFrame):
try:
self._regions = pd.read_csv(self._OOI_PORTAL_REGIONS).rename(
{
"reference_designator": "array_rd",
"name": "region_name",
},
axis="columns",
)
except Exception as e:
logger.error(e)
return self._regions
@property
def sites(self):
""" Returns the OOI sites """
if not isinstance(self._sites, pd.DataFrame):
try:
self._sites = (
pd.read_csv(self._OOI_PORTAL_SITES)
.dropna(subset=["longitude", "latitude"]) # noqa
.rename(
{
"reference_designator": "site_rd",
"name": "site_name",
},
axis="columns",
)
)
except Exception as e:
logger.error(e)
return self._sites
@property
def instruments(self):
def threads_alive(t):
return not t.is_alive()
if all(list(map(threads_alive, self._thread_list))):
""" Returns instruments dataframe """
if isinstance(self._filtered_data_catalog, pd.DataFrame):
return get_instrument_list(self._filtered_data_catalog)
if isinstance(self._current_data_catalog, pd.DataFrame):
return get_instrument_list(self._current_data_catalog)
else:
message = "Please wait while we fetch the metadata ..."
logger.info(message)
@property
def deployments(self):
""" Return instruments deployments """
instrument_list = self._current_data_catalog
if isinstance(self._filtered_data_catalog, pd.DataFrame):
instrument_list = self._filtered_data_catalog
if len(instrument_list) <= 50:
text = f"Fetching deployments from {len(instrument_list)} unique instrument streams..." # noqa
print(text) # noqa
logger.info(text)
dflist = [
self._get_deployments(inst)
for idx, inst in instrument_list.iterrows()
] # noqa
return pd.concat(dflist).reset_index(drop="index")
else:
raise Exception(
f"You have {len(instrument_list)} unique streams; too many to fetch deployments. Please filter by performing search."
) # noqa
@property
def annotations(self):
""" Return instruments annotations """
instrument_list = self._current_data_catalog
if isinstance(self._filtered_data_catalog, pd.DataFrame):
instrument_list = self._filtered_data_catalog
if len(instrument_list) <= 20:
text = f"Fetching annotations from {len(instrument_list)} unique instrument streams..." # noqa
print(text) # noqa
logger.info(text)
dflist = [
self._get_annotations(inst)
for idx, inst in instrument_list.iterrows()
] # noqa
return pd.concat(dflist).reset_index(drop="index")
else:
raise Exception(
f"You have {len(instrument_list)} unique streams; too many to fetch annotations. Please filter by performing search."
) # noqa
@property
def start_date(self):
""" Return requested start date(s) """
if isinstance(self._start_date, pd.Series):
return self._start_date
return "Start date(s) can't be found."
@property
def end_date(self):
""" Return requested end date(s) """
if isinstance(self._end_date, pd.Series):
return self._end_date
return "End date(s) can't be found."
@property
def source_name(self):
""" Return data source name """
return self._source_name
@property
def last_requests(self):
""" Return last request url and parameters """
if self._request_urls:
return self._request_urls
return "Data request has not been made."
@property
def last_m2m_urls(self):
""" Return last request m2m urls """
if self._last_m2m_urls:
return self._last_m2m_urls
return "Data request has not been made."
@property
def global_ranges(self):
""" Return global ranges """
return self._get_global_ranges()
def view_instruments(self):
"""
**DEPRECATED.**
Shows the current instruments requested.
Use OOI.instruments attribute instead.
Returns:
DataFrame: Pandas dataframe of the instruments.
"""
warnings.warn(
"The function view_instruments is deprecated. Please use OOI.instruments attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.instruments
def view_regions(self):
"""
**DEPRECATED.**
Shows the regions within OOI.
Use OOI.regions attribute instead.
Returns:
DataFrame: Pandas dataframe of the regions.
"""
warnings.warn(
"The function view_regions is deprecated. Please use OOI.regions attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.regions
def view_sites(self):
"""
**DEPRECATED.**
Shows the sites within OOI.
Use OOI.sites attribute instead.
Returns:
DataFrame: Pandas dataframe of the sites.
"""
warnings.warn(
"The function view_sites is deprecated. Please use OOI.sites attribute instead.",
DeprecationWarning,
stacklevel=2,
)
return self.sites
def __repr__(self):
""" Prints out the representation of the OOI object """
inst_text = "Instrument Stream"
if isinstance(self._current_data_catalog, pd.DataFrame):
data_length = len(
self._current_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
else:
data_length = 0
if isinstance(self._filtered_data_catalog, pd.DataFrame):
data_length = len(
self._filtered_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
if data_length > 1:
inst_text = inst_text + "s"
return (
f"<Data Source: {self._source_name} ({data_length} {inst_text})>"
) # noqa
def __len__(self):
""" Prints the length of the object """
if isinstance(self._filtered_data_catalog, pd.DataFrame):
return len(
self._filtered_data_catalog.drop_duplicates(
subset=[
"reference_designator",
"stream_method",
"stream_rd",
]
)
)
else:
return 0
def _setup(self):
""" Setup the OOI Instance by fetching data catalog ahead of time """
logger.debug("Setting UFrame credentials.")
if not self.ooi_username or not self.ooi_token:
self._use_existing_credentials()
# Check if ooinet is available
try:
req = requests.get("https://ooinet.oceanobservatories.org")
if req.status_code == 200:
threads = [
("get-data-catalog", self._get_data_catalog),
("get-global-ranges", self._get_global_ranges),
("get-rawdata-filelist", self._get_rawdata_filelist),
] # noqa
for t in threads:
ft = set_thread(*t)
self._thread_list.append(ft)
else:
logger.warning(
f"Server not available, please try again later: {req.status_code}"
)
except Exception as e:
logger.error(f"Server not available, please try again later: {e}")
# Retrieve datasets info in the s3 bucket.
try:
self._s3content = [
os.path.basename(rd) for rd in FILE_SYSTEM.ls(BUCKET_DATA)
]
except Exception as e:
logger.error(e)
def request_data(
self, begin_date, end_date, data_type="netcdf", limit=-1, **kwargs
):
"""
Request data for filtered instruments.
Args:
begin_date (str): Begin date of desired data in ISO-8601 Format.
end_date (str): End date of desired data in ISO-8601 Format.
data_type (str): Desired data type. Either 'netcdf' or 'json'.
limit (int, optional): Desired data points. Required for 'json' ``data_type``. Max is 20000.
**kwargs: Optional Keyword arguments. \n
**time_check** - set to true (default) to ensure the request times fall within the stream data availability \n
**exec_dpa** - boolean value specifying whether to execute all data product algorithms to return L1/L2 parameters (Default is True) \n
**provenance** - boolean value specifying whether provenance information should be included in the data set (Default is True) \n
**email** - provide email.
Returns:
self: Modified OOI Object. Use ``raw()`` to see either data url for netcdf or json result for json.
"""
self._data_type = data_type
begin_dates = list(map(lambda x: x.strip(" "), begin_date.split(",")))
end_dates = list(map(lambda x: x.strip(" "), end_date.split(",")))
data_catalog_copy = self._filtered_data_catalog.copy()
self._q = Queue()
# Limit the number of request
if len(data_catalog_copy) > 6:
text = f"Too many instruments to request data for! Max is 6, you have {len(data_catalog_copy)}" # noqa
logger.error(text)
raise Exception(text)
if len(begin_dates) == 1 and len(end_dates) == 1:
begin_dates = begin_dates[0]
end_dates = end_dates[0]
elif len(begin_dates) != len(end_dates):
logger.warning(
"Please provide the same number of begin and end dates"
)
raise ValueError(
"Please provide the same number of begin and end dates"
)
else:
begin_dates = pd.Series(begin_dates)
end_dates = pd.Series(end_dates)
self._start_date = (begin_date,)
self._end_date = end_dates
request_urls = []
if self._cloud_source:
data_catalog_copy.loc[:, "user_begin"] = pd.to_datetime(
begin_dates
)
data_catalog_copy.loc[:, "user_end"] = pd.to_datetime(end_dates)
data_catalog_copy.loc[:, "full_rd"] = data_catalog_copy.apply(
lambda row: "-".join(
[
row["reference_designator"],
row["stream_method"],
row["stream_rd"],
]
),
axis=1,
)
data_catalog_copy.loc[:, "rd_path"] = data_catalog_copy[
"full_rd"
].apply(lambda row: "/".join([BUCKET_DATA, row]))
request_urls = data_catalog_copy["rd_path"].values.tolist()
for idx, row in data_catalog_copy.iterrows():
tempdf = pd.DataFrame(
FILE_SYSTEM.ls(row["rd_path"]), columns=["uri"]
)
tempdf.loc[:, "time"] = tempdf.apply(
lambda r: pd.to_datetime(os.path.basename(r["uri"])),
axis=1,
)
selected = tempdf[
(tempdf.time >= row["user_begin"])
& (tempdf.time <= row["user_end"])
]
if len(selected) > 0:
self._q.put([selected, row["user_begin"], row["user_end"]])
else:
data_catalog_copy["user_begin"] = begin_dates
data_catalog_copy["user_end"] = end_dates
# For bio-acoustic sonar only
self._zplsc_data_catalog = data_catalog_copy[
data_catalog_copy.instrument_name.str.contains(
"bio-acoustic sonar", case=False
)
]
data_catalog_copy = data_catalog_copy[
~data_catalog_copy.instrument_name.str.contains(
"bio-acoustic sonar", case=False
)
]
if len(data_catalog_copy) > 0:
request_urls = [
instrument_to_query(
ooi_url=self._OOI_DATA_URL,
site_rd=row.site_rd,
infrastructure_rd=row.infrastructure_rd,
instrument_rd=row.instrument_rd,
stream_method=row.stream_method,
stream_rd=row.stream_rd,
begin_ts=row.user_begin,
end_ts=row.user_end,
stream_start=row.begin_date,
stream_end=row.end_date,
application_type=data_type,
limit=limit,
**kwargs,
)
for idx, row in data_catalog_copy.iterrows()
]
prepared_requests = [
requests.Request(
"GET",
data_url,
auth=(self.ooi_username, self.ooi_token),
params=params,
)
for data_url, params in request_urls
] # noqa
for job in prepared_requests:
prepped = job.prepare()
self._last_m2m_urls.append(prepped.url)
self._q.put(prepped)
if len(self._raw_data) > 0:
self._raw_data = []
self._process_request()
# block until all tasks are done
self._q.join()
if isinstance(self._zplsc_data_catalog, pd.DataFrame):
if len(self._zplsc_data_catalog) > 0:
self._zplsc_data_catalog.loc[
:, "ref"
] = self._zplsc_data_catalog.reference_designator.apply(
lambda rd: rd[:14]
)
filtered_datadf = {}
for idx, row in self._zplsc_data_catalog.iterrows():
filtered_datadf[row["ref"]] = self._raw_datadf[row["ref"]][
row["user_begin"] : row["user_end"]
].copy()
filtered_rawdata = filtered_datadf[row["ref"]]
filtered_rawdata.loc[
:, "urls"
] = filtered_rawdata.filename.apply(
lambda f: "/".join([self._raw_data_url[row["ref"]], f])
)
raw_file_dict = perform_ek60_download(filtered_datadf)
self._raw_file_dict = raw_file_dict
self._raw_data.append(raw_file_dict)
self._request_urls = request_urls
return self
def search(
self,
region=None,
site=None,
node=None,
instrument=None,
stream_type="Science",
stream_method=None,
stream=None,
parameter=None,
):
"""
Perform a search, and filters data catalog
Args:
region (str): Region name. If multiple use comma separated.
site (str): Site name. If multiple use comma separated.
node (str): Node name. If multiple use comma separated.
instrument (str): Instrument name. If multiple use comma separated.
stream_type (str): Stream type. Either 'Science' or 'Engineering'. If multiple use comma separated.
stream_method (str): Stream method. If multiple use comma separated.
stream (str): Stream name. If multiple use comma separated.
parameter (str): Parameter name. If multiple use comma separated.
Returns:
self: Modified OOI Object
"""
if isinstance(self._current_data_catalog, pd.DataFrame):
current_dcat = self._current_data_catalog
else:
current_dcat = self._get_data_catalog()
self._current_data_catalog = current_dcat
if self._cloud_source:
current_dcat = current_dcat[
current_dcat.apply(
lambda row: "-".join(
[
row["reference_designator"],
row["stream_method"],
row["stream_rd"],
]
)
in self._s3content,
axis=1,
)
].reset_index(drop="index")
if region:
region_search = list(
map(lambda x: x.strip(" "), region.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.array_name.astype(str).str.contains(
"|".join(region_search), flags=re.IGNORECASE
)
| current_dcat.site_rd.astype(str).str.contains(
"|".join(region_search), flags=re.IGNORECASE
)
| current_dcat.reference_designator.astype(str).str.contains(
"|".join(region_search), flags=re.IGNORECASE
)
] # noqa
if site:
site_search = list(
map(lambda x: x.strip(" "), site.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.site_name.astype(str).str.contains(
"|".join(site_search), flags=re.IGNORECASE
)
| current_dcat.site_rd.astype(str).str.contains(
"|".join(site_search), flags=re.IGNORECASE
)
| current_dcat.reference_designator.astype(str).str.contains(
"|".join(site_search), flags=re.IGNORECASE
)
] # noqa
if node:
node_search = list(
map(lambda x: x.strip(" "), node.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.infrastructure_name.astype(str).str.contains(
"|".join(node_search), flags=re.IGNORECASE
)
| current_dcat.infrastructure_rd.astype(str).str.contains(
"|".join(node_search), flags=re.IGNORECASE
)
| current_dcat.reference_designator.astype(str).str.contains(
"|".join(node_search), flags=re.IGNORECASE
)
] # noqa
if instrument:
instrument_search = list(
map(lambda x: x.strip(" "), instrument.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.instrument_name.astype(str).str.contains(
"|".join(instrument_search), flags=re.IGNORECASE
)
| current_dcat.instrument_rd.astype(str).str.contains(
"|".join(instrument_search), flags=re.IGNORECASE
)
| current_dcat.reference_designator.astype(str).str.contains(
"|".join(instrument_search), flags=re.IGNORECASE
)
] # noqa
if parameter:
parameter_search = list(
map(lambda x: x.strip(" "), parameter.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.display_name.astype(str).str.contains(
"|".join(parameter_search), flags=re.IGNORECASE
)
| current_dcat.parameter_rd.astype(str).str.contains(
"|".join(parameter_search), flags=re.IGNORECASE
)
] # noqa
if stream_type:
stream_type_search = list(
map(lambda x: x.strip(" "), stream_type.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.stream_type.astype(str).str.contains(
"|".join(stream_type_search), flags=re.IGNORECASE
)
] # noqa
if stream_method:
stream_method_search = list(
map(lambda x: x.strip(" "), stream_method.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.stream_method.astype(str).str.contains(
"|".join(stream_method_search), flags=re.IGNORECASE
)
] # noqa
if stream:
stream_search = list(
map(lambda x: x.strip(" "), stream.split(","))
) # noqa
current_dcat = current_dcat[
current_dcat.stream_rd.astype(str).str.contains(
"|".join(stream_search), flags=re.IGNORECASE
)
] # noqa
self._filtered_data_catalog = current_dcat.drop_duplicates(
subset=["reference_designator", "stream_method", "stream_rd"]
)[
[
"array_name",
"site_name",
"infrastructure_name",
"instrument_name",
"site_rd",
"infrastructure_rd",
"instrument_rd",
"reference_designator",
"stream_method",
"stream_type",
"stream_rd",
"begin_date",
"end_date",
]
].reset_index(
drop="index"
)
return self
def clear(self):
"""
Clears the search filter.
Returns:
self: Modified OOI Object
"""
if isinstance(self._filtered_data_catalog, pd.DataFrame):
self._filtered_data_catalog = None
return self
def raw(self):
""" Returns the raw result from data request in json format """
return self._raw_data
def download_netcdfs(self, destination=os.path.curdir, timeout=3600):
"""
Download netcdf files from the catalog created from data request.
Args:
destination (str, optional): Location to save netcdf file. Default will save in current directory.
timeout (int, optional): Expected download time before timing out in seconds. Defaults to 30min or 3600s.
Returns:
list: List of exported netcdf.
"""
if not isinstance(timeout, int):
raise TypeError(f"Expected int; {type(int)} given.")
download_list = self._prepare_download()
logger.info("Downloading netcdfs ...")
jobs = [
gevent.spawn(download_url, url, destination, self._session)
for url in download_list
]
gevent.joinall(jobs, timeout=timeout)
finished_netcdfs = [job.value for job in jobs]
if finished_netcdfs:
self._last_downloaded_netcdfs = [
os.path.join(os.path.abspath(destination), nc)
for nc in finished_netcdfs
] # noqa
return self._last_downloaded_netcdfs
def to_xarray(self, **kwargs):
"""
Retrieve the OOI streams data and export to Xarray Datasets, saving in memory.
Args:
**kwargs: Keyword arguments for xarray open_mfdataset.
Returns:
list: List of xarray datasets
"""
ref_degs = self._filtered_data_catalog["reference_designator"].values
dataset_list = []
if self._data_type == "netcdf":
if not self._cloud_source:
if self._raw_file_dict:
mvbsnc_list = perform_ek60_processing(self._raw_file_dict)
for k, v in mvbsnc_list.items():
resdf = xr.open_mfdataset(
v,
concat_dim=["ping_time"],
combine="nested",
**kwargs,
)
dataset_list.append(resdf)
turls = self._perform_check()
if len(turls) > 0:
self._netcdf_urls = [get_nc_urls(turl) for turl in turls]
logger.info("Acquiring data from opendap urls ...")
jobs = [
gevent.spawn(fetch_xr, (url, ref_degs), **kwargs)
for url in turls
]
gevent.joinall(jobs, timeout=300)
for job in jobs:
dataset_list.append(job.value)
else:
self._logger.warning(
f"{self._data_type} cannot be converted to xarray dataset"
) # noqa
if dataset_list:
self._dataset_list = dataset_list
return self._dataset_list
def check_status(self):
""" Function for user to manually check the status of the data """
if not self._q.empty():
return None
turls = []
filtered_data_urls = list(filter(lambda x: "allURLs" in x, self.raw()))
for durl in filtered_data_urls:
turl = self._check_data_status(durl)
if turl:
turls.append(turl)
if len(turls) == len(filtered_data_urls):
return turls
return None
def data_availability(self):
"""
Plots data availability of desired instruments.
Returns:
pandas.DataFrame: Instrument Stream legend
"""
import matplotlib.pyplot as plt
import seaborn as sns
plt.clf()
plt.close("all")
inst = self._filtered_data_catalog.copy()
if isinstance(inst, pd.DataFrame):
if len(inst) > 0:
da_list = []
for idx, i in inst.iterrows():
if i.instrument_name not in [
"Bio-acoustic Sonar (Coastal)"
]:
da_list.append(self._fetch_monthly_stats(i))
else:
print(
f"{i.reference_designator} not available for data availability"
)
if len(da_list) > 0:
dadf = pd.concat(da_list)
dadf.loc[:, "unique_rd"] = dadf.apply(
lambda row: "-".join(
[
row.reference_designator,
row.stream_method,
row.stream_rd,
]
),
axis=1,
)
inst.loc[:, "unique_rd"] = inst.apply(
lambda row: "-".join(
[
row.reference_designator,
row.stream_method,
row.stream_rd,
]
),
axis=1,
)
name_df = inst[
[
"array_name",
"site_name",
"infrastructure_name",
"instrument_name",
"unique_rd",
]
]
raw_plotdf = pd.merge(dadf, name_df)
plotdf = raw_plotdf.pivot_table(
index="unique_rd", columns="month", values="percentage"
)
sns.set(style="white")
_, ax = plt.subplots(figsize=(20, 10))
ax.set_title("OOI Data Availability")
sns.heatmap(
plotdf,
annot=False,
fmt=".2f",
linewidths=1,
ax=ax,
square=True,
cmap=sns.light_palette("green"),
cbar_kws={
"orientation": "horizontal",
"shrink": 0.7,
"pad": 0.3,
"aspect": 30,
},
)
plt.ylabel("Instruments", rotation=0, labelpad=60)
plt.xlabel("Months", labelpad=30)
plt.yticks(rotation=0)
plt.tight_layout()
legend = raw_plotdf[
(
list(raw_plotdf.columns.values[-5:])
+ ["stream_method", "stream_rd"]
)
].drop_duplicates(subset="unique_rd")
legend.loc[:, "labels"] = legend.apply(
lambda row: [
row.array_name,
row.site_name,
row.infrastructure_name, # noqa
row.instrument_name,
row.stream_method,
row.stream_rd,
],
axis=1,
)
ldct = {}
for idx, row in legend.iterrows():
ldct[row.unique_rd] = row.labels
return | pd.DataFrame.from_dict(ldct) | pandas.DataFrame.from_dict |
# coding=utf-8
"""
This module contains functions for debugging decision tree matcher.
"""
import logging
import subprocess
import pandas as pd
import six
from py_entitymatching.utils.validation_helper import validate_object_type
from sklearn.tree import export_graphviz
from py_entitymatching.feature.extractfeatures import apply_feat_fns
from py_entitymatching.matcher.dtmatcher import DTMatcher
logger = logging.getLogger(__name__)
def visualize_tree(decision_tree, table_columns, exclude_attrs=None):
"""
This function is used to visualize the decision tree learned from the
training data using the 'fit' method.
Note that, this function does not pop up a visualization of a decision tree.
It creates a png file in the local directory and the user has to
explicitly open the file to view the tree. More over, this function uses
'dot' command and graphviz to create the
visualization. It is assumed that the 'dot' command is present and
graphviz is installed in the local machine, which this function is executed.
Args:
decision_tree (DTMatcher or DecisionTreeClassifier): The decision tree
matcher for which the visualization needs to be generated.
table_columns (list): Attributes that were
from the input table that was used to train the decision tree.
exclude_attrs (list): Attributes that should be removed from the
table columns to get the actual feature vectors (defaults to None).
"""
# Validate input parameters
# # We expect the input decision tree to be of type DTMatcher. If so get
# the classifier out of it.
if isinstance(decision_tree, DTMatcher):
tree = decision_tree.clf
else:
tree = decision_tree
# If the exclude attribute is nothing, then all the given columns are
# feature vectors.
if exclude_attrs is None:
feature_names = table_columns
else:
# Else pick out the feature vector columns based on the exclude
# attributes.
columns = [c not in exclude_attrs for c in table_columns]
feature_names = table_columns[columns]
# Create a file (as of now hardcoded) and write the tree into that file.
with open("dt_.dot", 'w') as f:
export_graphviz(tree, out_file=f,
feature_names=feature_names)
# Create a png file from the dot file and store it in the same directory
command = ["dot", "-Tpng", "dt_.dot", "-o", "dt_.png"]
# noinspection PyBroadException
try:
subprocess.check_call(command)
except:
logger.error("Could not run dot, ie graphviz, to "
"produce visualization")
return
# Finally, print a help information on how to display the visualization
# from the ipython console.
print("Execute the following command in IPython command prompt:")
print("")
print("from IPython.display import Image")
print("Image(filename='dt_.png') ")
def _get_code(tree, feature_names, target_names,
spacer_base=" "):
"""
Produce psuedo-code for decision tree.
This is based on http://stackoverflow.com/a/30104792.
"""
# Get the left, right trees and the threshold from the tree
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
# Get the features from the tree
features = [feature_names[i] for i in tree.tree_.feature]
value = tree.tree_.value
code_list = []
# Now recursively build the tree by going through each node.
def recurse(left, right, threshold, features, node, depth):
"""
Recurse function to encode the debug logic at each node.
"""
spacer = spacer_base * depth
# For each of the threshold conditions, add appropriate code that
# should be executed.
if threshold[node] != -2:
code_str = spacer + "if ( " + features[node] + " <= " + \
str(threshold[node]) + " ):"
code_list.append(code_str)
code_str = spacer + spacer_base + "print( \'" + spacer_base + "" + \
features[
node] + " <= " + str(
threshold[node]) + \
" is True " + "( value : \' + str(" + str(
features[node]) + ") + \')\')"
code_list.append(code_str)
if left[node] != -1:
recurse(left, right, threshold, features,
left[node], depth + 1)
code_str = spacer + "else:"
code_list.append(code_str)
code_str = spacer + spacer_base + "print( \'" + spacer_base + "" + \
features[
node] + " <= " + str(
threshold[node]) + \
" is False " + "( value : \' + str(" + str(
features[node]) + ") + \')\')"
code_list.append(code_str)
if right[node] != -1:
recurse(left, right, threshold, features,
right[node], depth + 1)
else:
target = value[node]
winning_target_name = None
winning_target_count = None
for i, v in zip(pd.np.nonzero(target)[1],
target[ | pd.np.nonzero(target) | pandas.np.nonzero |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
| pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate') | pandas.Series |
# -*- coding: utf-8 -*-
# Radproc - A GIS-compatible Python-Package for automated RADOLAN Composite Processing and Analysis.
# Copyright (c) 2018, <NAME>.
# DOI: https://doi.org/10.5281/zenodo.1313701
#
# Distributed under the MIT License (see LICENSE.txt for more information), complemented with the following provision:
# For the scientific transparency and verification of results obtained and communicated to the public after
# using a modified version of the work, You (as the recipient of the source code and author of this modified version,
# used to produce the published results in scientific communications) commit to make this modified source code available
# in a repository that is easily and freely accessible for a duration of five years after the communication of the obtained results.
"""
=====================
Raw Data Processing
=====================
Functions for raw data processing.
Unzip, import, clip and convert RADOLAN raw data and write DataFrames to HDF5.
.. autosummary::
:nosignatures:
:toctree: generated/
unzip_RW_binaries
unzip_YW_binaries
radolan_binaries_to_dataframe
radolan_binaries_to_hdf5
create_idraster_and_process_radolan_data
process_radolan_data
.. module:: radproc.raw
:platform: Windows
:synopsis: Python package radproc (Radar data processing), Module raw
.. moduleauthor:: <NAME>
"""
import numpy as np
import pandas as pd
import os, sys
import tarfile as _tarfile
import gzip as _gzip
import shutil as _shutil
from datetime import datetime
#from radproc.wradlib_io import read_RADOLAN_composite
#from radproc.sampledata import get_projection_file_path
import radproc.wradlib_io as _wrl_io
import radproc.sampledata as _sampledata
import warnings, tables
def unzip_RW_binaries(zipFolder, outFolder):
"""
Unzips RADOLAN RW binary data saved in monthly .tar or tar.gz archives (e.g. RWrea_200101.tar.gz, RWrea_200102.tar.gz).
If necessary, extracted binary files are zipped to .gz archives to save memory space on disk.
Creates directory tree of style
*<outFolder>/<year>/<month>/<binaries with hourly data as .gz files>*
:Parameters:
------------
zipFolder : string
Path of directory containing RW data as monthly tar / tar.gz archives to be unzipped.
Archive names must contain year and month at end of basename: RWrea_200101.tar or RWrea_200101.tar.gz
outFolder : string
Path of output directory. Will be created if it doesn't exist, yet.
:Returns:
---------
No return value
"""
if not os.path.exists(outFolder):
os.mkdir(outFolder)
# create list of all tar files and identify years
tarFileList = os.listdir(zipFolder)
years = np.unique([f[-10:-6] if f.endswith(".tar") else f[-13:-9] for f in tarFileList])
for year in years:
# only select files of current year
tarFilesYear = [f for f in tarFileList if year in f]
# create new folder for current year
yearFolder = os.path.join(outFolder, year)
os.mkdir(yearFolder)
for monthTarFile in tarFilesYear:
# create month folder for every month archive
if monthTarFile.endswith('.tar.gz'):
month = str(int(monthTarFile[-9:-7]))
elif monthTarFile.endswith('.tar'):
month = str(int(monthTarFile[-6:-4]))
monthFolder = os.path.join(yearFolder, month)
os.mkdir(monthFolder)
# open tar archive and extract all files to month folder
with _tarfile.open(name = os.path.join(zipFolder,monthTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
binaryList = os.listdir(monthFolder)
# if extracted files are already .gz archives: skip, else: zip binary files to .gz archives and delete unzipped files
if not binaryList[0].endswith(".gz"):
for binaryName in binaryList:
binaryFile = os.path.join(monthFolder, binaryName)
with open(binaryFile, 'rb') as f_in, _gzip.open(os.path.join(monthFolder, binaryName + ".gz"), 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
os.remove(binaryFile)
def unzip_YW_binaries(zipFolder, outFolder):
"""
Unzips RADOLAN YW binary data.
Data have to be saved in monthly .tar or tar.gz archives (e.g. YWrea_200101.tar.gz, YWrea_200102.tar.gz),
which contain daily archives with binary files.
If necessary, extracted binary files are zipped to .gz archives to save memory space on disk.
Creates directory tree of style
*<outFolder>/<year>/<month>/<binaries with data in temporal resolution of 5 minutes as .gz files>*
:Parameters:
------------
zipFolder : string
Path of directory containing YW data as monthly tar / tar.gz archives to be unzipped.
Archive names must contain year and month at end of basename: YWrea_200101.tar or YWrea_200101.tar.gz
outFolder : string
Path of output directory. Will be created if it doesn't exist, yet.
:Returns:
---------
No return value
"""
if not os.path.exists(outFolder):
os.mkdir(outFolder)
# create list of all tar files
tarFileList = os.listdir(zipFolder)
years = np.unique([f[-10:-6] if f.endswith(".tar") else f[-13:-9] for f in tarFileList])
for year in years:
# only select files of current year
tarFilesYear = [f for f in tarFileList if year in f]
# create new folder for current year
yearFolder = os.path.join(outFolder, year)
os.mkdir(yearFolder)
# for every month...
for monthTarFile in tarFilesYear:
# create month folder for every month archive
if monthTarFile.endswith('.tar.gz'):
month = str(int(monthTarFile[-9:-7]))
elif monthTarFile.endswith('.tar'):
month = str(int(monthTarFile[-6:-4]))
monthFolder = os.path.join(yearFolder, month)
os.mkdir(monthFolder)
# open tar archive and extract all daily gz archives to month folder
with _tarfile.open(name = os.path.join(zipFolder,monthTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
# for every day...
dayTarFileList = os.listdir(monthFolder)
for dayTarFile in dayTarFileList:
with _tarfile.open(name = os.path.join(monthFolder, dayTarFile), mode = 'r') as tar_ref:
tar_ref.extractall(monthFolder)
os.remove(os.path.join(monthFolder, dayTarFile))
binaryList = os.listdir(monthFolder)
# if extracted files are already .gz archives: skip, else: zip binary files to .gz archives and delete unzipped files
if not binaryList[0].endswith(".gz"):
for binaryName in binaryList:
binaryFile = os.path.join(monthFolder, binaryName)
with open(binaryFile, 'rb') as f_in, _gzip.open(os.path.join(monthFolder, binaryName + ".gz"), 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
os.remove(binaryFile)
def radolan_binaries_to_dataframe(inFolder, idArr=None):
"""
Import all RADOLAN binary files in a directory into a pandas DataFrame,
optionally clipping the data to the extent of an investigation area specified by an ID array.
:Parameters:
------------
inFolder : string
Path to the directory containing RADOLAN binary files.
All files ending with '-bin' or '-bin.gz' are read in.
The input folder path does not need to have any particular directory structure.
idArr : one-dimensional numpy array (optional, default: None)
containing ID values to select RADOLAN data of the cells located in the investigation area.
If no idArr is specified, the ID array is automatically generated from RADOLAN metadata
and RADOLAN precipitation data are not clipped to any investigation area.
:Returns:
---------
(df, metadata) : tuple with two elements:
df : pandas DataFrame containing...
- RADOLAN data of the cells located in the investigation area
- datetime row index with defined frequency depending on the RADOLAN product and time zone UTC
- ID values as column names
metadata : dictionary
containing metadata from the last imported RADOLAN binary file
In case any binary files could not be read in due to processing errors,
these are skipped and the respective intervals are filled with NoData (NaN) values.
A textfile with the names and error messages for the respective monthly input data folder is written for information.
For example, errors due to obviously corrupted file formats are known for the RADOLAN RW dataset
in July and August 2005 and May 2007.
:Format description and examples:
---------------------------------
Every row of the output DataFrame equals a precipitation raster of the investigation area at the specific date.
Every column equals a time series of the precipitation at a specific raster cell.
Data can be accessed and sliced with the following Syntax:
**df.loc[row_index, column_name]**
with row index as string in date format 'YYYY-MM-dd hh:mm' and column names as integer values
**Examples:**
>>> df.loc['2008-05-01 00:50',414773] #--> returns single float value of specified date and cell
>>> df.loc['2008-05-01 00:50', :] #--> returns entire row (= raster) of specified date as one-dimensional DataFrame
>>> df.loc['2008-05-01', :] #--> returns DataFrame with all rows of specified day (because time of day is omitted)
>>> df.loc[, 414773] #--> returns time series of the specified cell as Series
"""
try:
# List all files in directory
files = os.listdir(inFolder)
except:
print("Directory %s can not be found. Please check your input parameter!" % inFolder)
sys.exit()
ind = []
# Check file endings. Only keep files ending on -bin or -bin.gz which are the usual formats of RADOLAN binary files
files = [f for f in files if f.endswith('-bin') or f.endswith('-bin.gz')]
# Load first binary file to access header information
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[0]))
del data
except:
# if file could not be read, try next file until metadata of one file could be accessed
got_metadata = False
i=0
while got_metadata == False:
print("Can not open %s to access metadata. Trying next file." % files[i])
i+=1
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[i]))
del data
got_metadata = True
except:
got_metadata = False
# interrupt after first 100 files to avoid infinite loops
if i == 100:
print('Could not read the first 100 files in. Exit script. Please check your input files and parameters.')
raise
# different RADOLAN products have different grid sizes (e.g. 900*900 for the RADOLAN national grid,
# 1100*900 for the extended national grid used for RADKLIM)
gridSize = metadata['nrow'] * metadata['ncol']
# if no ID array is specified, generate it from metadata
if idArr is None:
idArr = np.arange(0, gridSize)
# Create two-dimensiona array of dtype float32 filled with zeros. One row per file in inFolder, one column per ID in idArr.
dataArr = np.zeros((len(files), len(idArr)), dtype = np.float32)
skipped_files = []
error_messages = []
# For each file in directory...
for i in range(0, len(files)):
# Read data and header of RADOLAN binary file
try:
data, metadata = _wrl_io.read_RADOLAN_composite(os.path.join(inFolder, files[i]))
# append datetime object to index list. Pandas automatically interprets this list as timeseries.
ind.append(metadata['datetime'])
# binary data block starts in the lower left corner but ESRI Grids are created starting in the upper left corner by default
# [::-1] --> reverse row order of 2D-array so the first row ist located in the geographic north
# reshape(gridSize,) --> convert to one-dimensional array
data = data[::-1].reshape(gridSize,)
# Replace NoData values with NaN
data[data == metadata['nodataflag']] = np.nan
# Clip data to investigation area by selecting all values with a corresponding ID in idArr
# and insert data as row in the two-dimensional data array.
dataArr[i,:] = data[idArr]
except Exception as e:
skipped_files.append(files[i])
error_messages.append(str(e))
# extract datetime from filename instead of metadata
date_str = files[i].split("-")[2]
datetime_obj = datetime.strptime(date_str, '%y%m%d%H%M')
# some early RADOLAN intervals start at HH:45, but in file name stands HH:50
if ind[0].minute == 45:
datetime_obj = datetime_obj.replace(minute=45)
# append extracted date to index and insert NaN to all cells of the skipped interval
ind.append(datetime_obj)
dataArr[i,:] = np.zeros((1, len(idArr)), dtype=np.float32).fill(np.nan)
# Convert 2D data array to DataFrame, set timeseries index and column names and localize to time zone UTC
df = pd.DataFrame(dataArr, index = ind, columns = idArr)
df.columns.name = 'Cell-ID'
df.index.name = 'Date (UTC)'
df.index = df.index.tz_localize('UTC')
#df = df.tz_localize('UTC')
metadata['timezone'] = 'UTC'
metadata['idArr'] = idArr
# check for RADOLAN product type and set frequency of DataFrame index
# lists can be extended for other products...
if metadata['producttype'] in ["RW"]:
try:
# try to prevent dataframe copying by .asfreq(). this does not seem to work in all pandas versions --> try - except
df.index.freq = | pd.tseries.offsets.Hour() | pandas.tseries.offsets.Hour |
import math
from math import sqrt
from math import e as exp
import seaborn as sns
import statsmodels.api as sm
import random
from scipy import optimize
import pandas as pd
import numpy as np
from scipy.ndimage.filters import gaussian_filter, median_filter
class River:
def __init__(self):
self.error = 0
##### logit model for probability of amplification
probfunction = | pd.DataFrame([[-2,.333],[-1, .875],[0,1],[1,1], [-10,0], [-3,0]], columns=['initial eDNA', 'probability of amplification']) | pandas.DataFrame |
USAGE="""
Create baseyear controls for MTC Bay Area populationsim.
This script does the following:
1) Downloads the relevant Census tables to a local cache specified by CensusFetcher.LOCAL_CACHE_FOLDER,
one table per file in CSV format. These files are the raw tables at a census geography appropriate
for the control geographies in this script, although the column headers have additional variables
that are more descriptive of what the columns mean.
To re-download the data using the Census API, remove the cache file.
2) It then combines the columns in the Census tables to match the control definitions in the
CONTROLS structure in the script.
3) Finally, it transforms the control tables from the Census geographies to the desired control
geography using the MAZ_TAZ_DEF_FILE, which defines MAZs and TAZs as unions of Census blocks.
For controls derived from census data which is available at smaller geographies, this is a
simple aggregation.
However, for controls derived from census data which is not available at smaller geographies,
it is assumed that the smaller geography's total (e.g. households) are apportioned similarly
to it's census geography, and the controls are tallied that way.
4) Creates a simple file, output_[model_year]/maz_data_hh_pop.csv with 3 columns:
MAZ,hh,tot_pop for use in the maz_data.csv that will consistent with these controls, where
these "hh" include the 1-person group quarters households and the tot_pop includes both household
and group quarter persons.
5) It joins the MAZs and TAZs to the 2000 PUMAs (used in the 2007-2011 PUMS, which is
used by create_seed_population.py) and saves these crosswalks as well.
Outputs: households /data/[model_year]_[maz,taz,county]_controls.csv
households /data/geo_cross_walk.csv
group_quarters/data/[model_year]_maz_controls.csv
group_quarters/data/geo_cross_walk.csv
output_[model_year]/maz_data_hh_pop.csv
create_baseyear_controls_[model_year].log
"""
import argparse, collections, logging, os, sys
import census, us
import numpy, pandas, simpledbf
MAZ_TAZ_DEF_FILE = "M:\\Data\\GIS layers\\TM2_maz_taz_v2.2\\blocks_mazs_tazs.csv"
MAZ_TAZ_PUMA_FILE = "M:\\Data\\GIS layers\\TM2_maz_taz_v2.2\\mazs_TM2_v2_2_intersect_puma2000.dbf" # NOTE these are PUMA 2000
AGE_MAX = 130 # max person age
NKID_MAX = 10 # max number of kids
NPER_MAX = 10 # max number of persons
NWOR_MAX = 10 # max number of workers
HINC_MAX = 2000000
# COUNTY coding - census to our county code
COUNTY_RECODE = pandas.DataFrame([{"GEOID_county":"06001", "COUNTY":4, "county_name":"Alameda" , "REGION":1},
{"GEOID_county":"06013", "COUNTY":5, "county_name":"Contra Costa" , "REGION":1},
{"GEOID_county":"06041", "COUNTY":9, "county_name":"Marin" , "REGION":1},
{"GEOID_county":"06055", "COUNTY":7, "county_name":"Napa" , "REGION":1},
{"GEOID_county":"06075", "COUNTY":1, "county_name":"San Francisco", "REGION":1},
{"GEOID_county":"06081", "COUNTY":2, "county_name":"San Mateo" , "REGION":1},
{"GEOID_county":"06085", "COUNTY":3, "county_name":"Santa Clara" , "REGION":1},
{"GEOID_county":"06095", "COUNTY":6, "county_name":"Solano" , "REGION":1},
{"GEOID_county":"06097", "COUNTY":8, "county_name":"Sonoma" , "REGION":1}])
class CensusFetcher:
"""
Class to fetch the census data needed for these controls and cache them.
Uses the census python package (https://pypi.org/project/census/)
"""
# Location of the Census API key
API_KEY_FILE = "M:\\Data\\Census\\API\\api-key.txt"
# Store cache of census tables here
LOCAL_CACHE_FOLDER = "M:\\Data\\Census\\CachedTablesForPopulationSimControls"
CA_STATE_FIPS = "06"
BAY_AREA_COUNTY_FIPS = collections.OrderedDict([
("Alameda" ,"001"),
("Contra Costa" ,"013"),
("Marin" ,"041"),
("Napa" ,"055"),
("San Francisco","075"),
("San Mateo" ,"081"),
("Santa Clara" ,"085"),
("Solano" ,"095"),
("Sonoma" ,"097"),
])
# https://api.census.gov/data/2011/acs/acs5/variables.html
# https://api.census.gov/data/2012/acs5/variables.html
# https://api.census.gov/data/2010/sf1/variables.html
# https://api.census.gov/data/2015/acs5/variables.html
# https://api.census.gov/data/2015/acs1/variables.html
CENSUS_DEFINITIONS = {
"H13":[ # sf1, H13. Household Size [8]
# Universe: Occupied housing units
["variable","pers_min", "pers_max"],
["H0130001", 1, NPER_MAX], # Occupied housing units
["H0130002", 1, 1], # 1-person household
["H0130003", 2, 2], # 2-person household
["H0130004", 3, 3], # 3-person household
["H0130005", 4, 4], # 4-person household
["H0130006", 5, 5], # 5-person household
["H0130007", 6, 6], # 6-person household
["H0130008", 7, NPER_MAX], # 7-or-more-person household
],
"P16":[ # sf1, P16. POPULATION IN HOUSEHOLDS BY AGE
# Universe: Population in households
["variable", "age_min", "age_max"],
["P0160001", 0, AGE_MAX], # Population in households
["P0160002", 0, 17], # Under 18 years
["P0160003", 18, AGE_MAX], # 18 years and over
],
"P12":[ # sf1, P12. Sex By Age [49]
# Universe: Total population
["variable", "sex", "age_min", "age_max"],
["P0120001", "All", 0, AGE_MAX], # Total population
["P0120002", "Male", 0, AGE_MAX], # Male:
["P0120003", "Male", 0, 4], # Male: Under 5 years
["P0120004", "Male", 5, 9], # Male: 5 to 9 years
["P0120005", "Male", 10, 14], # Male: 10 to 14 years
["P0120006", "Male", 15, 17], # Male: 15 to 17 years
["P0120007", "Male", 18, 19], # Male: 18 and 19 years
["P0120008", "Male", 20, 20], # Male: 20 years
["P0120009", "Male", 21, 21], # Male: 21 years
["P0120010", "Male", 22, 24], # Male: 22 to 24 years
["P0120011", "Male", 25, 29], # Male: 25 to 29 years
["P0120012", "Male", 30, 34], # Male: 30 to 34 years
["P0120013", "Male", 35, 39], # Male: 35 to 39 years
["P0120014", "Male", 40, 44], # Male: 40 to 44 years
["P0120015", "Male", 45, 49], # Male: 45 to 49 years
["P0120016", "Male", 50, 54], # Male: 50 to 54 years
["P0120017", "Male", 55, 59], # Male: 55 to 59 years
["P0120018", "Male", 60, 61], # Male: 60 and 61 years
["P0120019", "Male", 62, 64], # Male: 62 to 64 years
["P0120020", "Male", 65, 66], # Male: 65 and 66 years
["P0120021", "Male", 67, 69], # Male: 67 to 69 years
["P0120022", "Male", 70, 74], # Male: 70 to 74 years",
["P0120023", "Male", 75, 79], # Male: 75 to 79 years",
["P0120024", "Male", 80, 84], # Male: 80 to 84 years",
["P0120025", "Male", 85, AGE_MAX], # Male: 85 years and over",
["P0120026", "Female", 0, AGE_MAX], # Female:
["P0120027", "Female", 0, 4], # Female: Under 5 years
["P0120028", "Female", 5, 9], # Female: 5 to 9 years
["P0120029", "Female", 10, 14], # Female: 10 to 14 years
["P0120030", "Female", 15, 17], # Female: 15 to 17 years
["P0120031", "Female", 18, 19], # Female: 18 and 19 years
["P0120032", "Female", 20, 20], # Female: 20 years
["P0120033", "Female", 21, 21], # Female: 21 years
["P0120034", "Female", 22, 24], # Female: 22 to 24 years
["P0120035", "Female", 25, 29], # Female: 25 to 29 years
["P0120036", "Female", 30, 34], # Female: 30 to 34 years
["P0120037", "Female", 35, 39], # Female: 35 to 39 years
["P0120038", "Female", 40, 44], # Female: 40 to 44 years
["P0120039", "Female", 45, 49], # Female: 45 to 49 years
["P0120040", "Female", 50, 54], # Female: 50 to 54 years
["P0120041", "Female", 55, 59], # Female: 55 to 59 years
["P0120042", "Female", 60, 61], # Female: 60 and 61 years
["P0120043", "Female", 62, 64], # Female: 62 to 64 years
["P0120044", "Female", 65, 66], # Female: 65 and 66 years
["P0120045", "Female", 67, 69], # Female: 67 to 69 years
["P0120046", "Female", 70, 74], # Female: 70 to 74 years",
["P0120047", "Female", 75, 79], # Female: 75 to 79 years",
["P0120048", "Female", 80, 84], # Female: 80 to 84 years",
["P0120049", "Female", 85, AGE_MAX], # Female: 85 years and over",
],
"B01001":[ # acs5, B01001. SEX BY AGE
# Universe: Total population
["variable", "sex", "age_min", "age_max"],
["B01001_001E", "All", 0, AGE_MAX], # Total population
["B01001_002E", "Male", 0, AGE_MAX], # Male
["B01001_003E", "Male", 0, 4], # Male Under 5 years
["B01001_004E", "Male", 5, 9], # Male 5 to 9 years
["B01001_005E", "Male", 10, 14], # Male 10 to 14 years
["B01001_006E", "Male", 15, 17], # Male 15 to 17 years
["B01001_007E", "Male", 18, 19], # Male 18 and 19 years
["B01001_008E", "Male", 20, 20], # Male 20 years
["B01001_009E", "Male", 21, 21], # Male 21 years
["B01001_010E", "Male", 22, 24], # Male 22 to 24 years
["B01001_011E", "Male", 25, 29], # Male 25 to 29 years
["B01001_012E", "Male", 30, 34], # Male 30 to 34 years
["B01001_013E", "Male", 35, 39], # Male 35 to 39 years
["B01001_014E", "Male", 40, 44], # Male 40 to 44 years
["B01001_015E", "Male", 45, 49], # Male 45 to 49 years
["B01001_016E", "Male", 50, 54], # Male 50 to 54 years
["B01001_017E", "Male", 55, 59], # Male 55 to 59 years
["B01001_018E", "Male", 60, 61], # Male 60 and 61 years
["B01001_019E", "Male", 62, 64], # Male 62 to 64 years
["B01001_020E", "Male", 65, 66], # Male 65 and 66 years
["B01001_021E", "Male", 67, 69], # Male 67 to 69 years
["B01001_022E", "Male", 70, 74], # Male 70 to 74 years
["B01001_023E", "Male", 75, 79], # Male 75 to 79 years
["B01001_024E", "Male", 80, 84], # Male 80 to 84 years
["B01001_025E", "Male", 85, AGE_MAX], # Male 85 years and over
["B01001_026E", "Female", 0, AGE_MAX], # Female
["B01001_027E", "Female", 0, 4], # Female Under 5 years
["B01001_028E", "Female", 5, 9], # Female 5 to 9 years
["B01001_029E", "Female", 10, 14], # Female 10 to 14 years
["B01001_030E", "Female", 15, 17], # Female 15 to 17 years
["B01001_031E", "Female", 18, 19], # Female 18 and 19 years
["B01001_032E", "Female", 20, 20], # Female 20 years
["B01001_033E", "Female", 21, 21], # Female 21 years
["B01001_034E", "Female", 22, 24], # Female 22 to 24 years
["B01001_035E", "Female", 25, 29], # Female 25 to 29 years
["B01001_036E", "Female", 30, 34], # Female 30 to 34 years
["B01001_037E", "Female", 35, 39], # Female 35 to 39 years
["B01001_038E", "Female", 40, 44], # Female 40 to 44 years
["B01001_039E", "Female", 45, 49], # Female 45 to 49 years
["B01001_040E", "Female", 50, 54], # Female 50 to 54 years
["B01001_041E", "Female", 55, 59], # Female 55 to 59 years
["B01001_042E", "Female", 60, 61], # Female 60 and 61 years
["B01001_043E", "Female", 62, 64], # Female 62 to 64 years
["B01001_044E", "Female", 65, 66], # Female 65 and 66 years
["B01001_045E", "Female", 67, 69], # Female 67 to 69 years
["B01001_046E", "Female", 70, 74], # Female 70 to 74 years
["B01001_047E", "Female", 75, 79], # Female 75 to 79 years
["B01001_048E", "Female", 80, 84], # Female 80 to 84 years
["B01001_049E", "Female", 85, AGE_MAX], # Female 85 years and over
],
"B11002":[ # acs5, B11002. HOUSEHOLD TYPE BY RELATIVES AND NONRELATIVES FOR POPULATION IN HOUSEHOLDS
# Universe: Population in households
["variable" ],
["B11002_001E"], # Estimate: Total
],
"B11005":[ # B11005. acs5, HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
["variable", "family", "famtype", "num_kids_min", "num_kids_max"],
["B11005_002E","All", "All", 1, NKID_MAX], # Households with one or more people under 18 years
["B11005_011E","All", "All", 0, 0], # Households with no people under 18 years
],
"P43":[ # sf1, P43. GROUP QUARTERS POPULATION BY SEX BY AGE BY GROUP QUARTERS TYPE [63]
# Universe: Population in group quarters
["variable", "sex", "age_min", "age_max", "inst","subcategory" ],
["P0430001", "All", 0, 130, "All", "All" ],
["P0430002", "Male", 0, 130, "All", "All" ],
["P0430003", "Male", 0, 17, "All", "All" ],
["P0430004", "Male", 0, 17, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430005", "Male", 0, 17, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430006", "Male", 0, 17, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430007", "Male", 0, 17, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430008", "Male", 0, 17, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430009", "Male", 0, 17, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430010", "Male", 0, 17, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (501)
["P0430011", "Male", 0, 17, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430012", "Male", 0, 17, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430013", "Male", 18, 64, "All", "All" ],
["P0430014", "Male", 18, 64, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430015", "Male", 18, 64, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430016", "Male", 18, 64, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430017", "Male", 18, 64, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430018", "Male", 18, 64, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430019", "Male", 18, 64, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430020", "Male", 18, 64, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430021", "Male", 18, 64, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430022", "Male", 18, 64, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430023", "Male", 65, 130, "All", "All" ],
["P0430024", "Male", 65, 130, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430025", "Male", 65, 130, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430026", "Male", 65, 130, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430027", "Male", 65, 130, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430028", "Male", 65, 130, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430029", "Male", 65, 130, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430030", "Male", 65, 130, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430031", "Male", 65, 130, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430032", "Male", 65, 130, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430033", "Male", 0, 130, "All", "All" ],
["P0430034", "Female", 0, 17, "All", "All" ],
["P0430035", "Female", 0, 17, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430036", "Female", 0, 17, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430037", "Female", 0, 17, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430038", "Female", 0, 17, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430039", "Female", 0, 17, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430040", "Female", 0, 17, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430041", "Female", 0, 17, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (501)
["P0430042", "Female", 0, 17, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430043", "Female", 0, 17, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430044", "Female", 18, 64, "All", "All" ],
["P0430045", "Female", 18, 64, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430046", "Female", 18, 64, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430047", "Female", 18, 64, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430048", "Female", 18, 64, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430049", "Female", 18, 64, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430050", "Female", 18, 64, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430051", "Female", 18, 64, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430052", "Female", 18, 64, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430053", "Female", 18, 64, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430054", "Female", 65, 130, "All", "All" ],
["P0430055", "Female", 65, 130, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430056", "Female", 65, 130, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430057", "Female", 65, 130, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430058", "Female", 65, 130, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430059", "Female", 65, 130, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430060", "Female", 65, 130, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430061", "Female", 65, 130, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430062", "Female", 65, 130, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430063", "Female", 65, 130, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
],
"B23025":[ # acs5, B23025. EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe: Population 16 years and over
["variable", "inlaborforce", "type", "employed" ],
["B23025_001E", "All", "All", "All" ], # Total
["B23025_002E", "Yes", "All", "All" ], # In labor force
["B23025_003E", "Yes", "Civilian", "All" ], # In labor force, Civilian labor force
["B23025_004E", "Yes", "Civilian", "Employed" ], # In labor force, Civilian labor force, Employed
["B23025_005E", "Yes", "Civilian", "Unemployed"], # In labor force, Civilian labor force, Unemployed
["B23025_006E", "Yes", "Armed Forces", "Employed" ], # In labor force, Armed Forces
["B23025_007E", "No", "All", "All" ], # Not in labor force
],
"B26001":[ # acs5, B26001. GROUP QUARTERS POPULATION
# Universe: Population in group quarters
["variable" ],
["B26001_001E"], # Estimate: Total
],
"PCT16":[ # sf1, PCT16. HOUSEHOLD TYPE BY NUMBER OF PEOPLE UNDER 18 YEARS (EXCLUDING HOUSEHOLDERS, SPOUSES, AND UNMARRIED PARTNERS) [26]
# Universe: Households
["variable", "family", "famtype", "num_kids_min", "num_kids_max"],
["PCT0160001", "All", "All", 0, NKID_MAX], # Total
["PCT0160002", "Family", "All", 0, NKID_MAX], # Family households:
["PCT0160003", "Family", "HusWif", 0, NKID_MAX], # Family households: - Husband-wife family:
["PCT0160004", "Family", "HusWif", 0, 0], # Family households: - Husband-wife family: - With no children under 18 years
["PCT0160005", "Family", "HusWif", 1, 1], # Family households: - Husband-wife family: - With one child under 18 years
["PCT0160006", "Family", "HusWif", 2, 2], # Family households: - Husband-wife family: - With two children under 18 years
["PCT0160007", "Family", "HusWif", 3, 3], # Family households: - Husband-wife family: - With three children under 18 years
["PCT0160008", "Family", "HusWif", 4, NKID_MAX], # Family households: - Husband-wife family: - With four or more children under 18 years
["PCT0160009", "Family", "MaleH", 0, NKID_MAX], # Family households: - Male householder, no wife present:
["PCT0160010", "Family", "MaleH", 0, 0], # Family households: - Male householder, no wife present: - With no children under 18 years
["PCT0160011", "Family", "MaleH", 1, 1], # Family households: - Male householder, no wife present: - With one child under 18 years
["PCT0160012", "Family", "MaleH", 2, 2], # Family households: - Male householder, no wife present: - With two children under 18 years
["PCT0160013", "Family", "MaleH", 3, 3], # Family households: - Male householder, no wife present: - With three children under 18 years
["PCT0160014", "Family", "MaleH", 4, NKID_MAX], # Family households: - Male householder, no wife present: - With four or more children under 18 years
["PCT0160015", "Family", "FemaleH", 0, NKID_MAX], # Family households: - Female householder, no husband present:
["PCT0160016", "Family", "FemaleH", 0, 0], # Family households: - Female householder, no husband present: - With no children under 18 years
["PCT0160017", "Family", "FemaleH", 1, 1], # Family households: - Female householder, no husband present: - With one child under 18 years
["PCT0160018", "Family", "FemaleH", 2, 2], # Family households: - Female householder, no husband present: - With two children under 18 years
["PCT0160019", "Family", "FemaleH", 3, 3], # Family households: - Female householder, no husband present: - With three children under 18 years
["PCT0160020", "Family", "FemaleH", 4, NKID_MAX], # Family households: - Female householder, no husband present: - With four or more children under 18 years
["PCT0160021", "Nonfamily","All", 0, NKID_MAX], # Nonfamily households:
["PCT0160022", "Nonfamily","All", 0, 0], # Nonfamily households: - With no children under 18 years
["PCT0160023", "Nonfamily","All", 1, 1], # Nonfamily households: - With one child under 18 years
["PCT0160024", "Nonfamily","All", 2, 2], # Nonfamily households: - With two children under 18 years
["PCT0160025", "Nonfamily","All", 3, 3], # Nonfamily households: - With three children under 18 years
["PCT0160026", "Nonfamily","All", 4, NKID_MAX], # Nonfamily households: - With four or more children under 18 years
],
"B08202":[ # acs5, B08202. HOUSEHOLD SIZE BY NUMBER OF WORKERS IN HOUSEHOLD
# Universe: Households
["variable", "workers_min","workers_max","persons_min","persons_max"],
["B08202_001E", 0, NWOR_MAX, 0, NPER_MAX], # Total:
["B08202_002E", 0, 0, 0, NPER_MAX], # Total: - No workers
["B08202_003E", 1, 1, 0, NPER_MAX], # Total: - 1 worker
["B08202_004E", 2, 2, 0, NPER_MAX], # Total: - 2 workers
["B08202_005E", 3, NWOR_MAX, 0, NPER_MAX], # Total: - 3 or more workers
["B08202_006E", 0, NWOR_MAX, 1, 1], # Total: - 1-person household:
["B08202_007E", 0, 0, 1, 1], # Total: - 1-person household: - No workers
["B08202_008E", 1, 1, 1, 1], # Total: - 1-person household: - 1 worker
["B08202_009E", 0, NWOR_MAX, 2, 2], # Total: - 2-person household:
["B08202_010E", 0, 0, 2, 2], # Total: - 2-person household: - No workers
["B08202_011E", 1, 1, 2, 2], # Total: - 2-person household: - 1 worker
["B08202_012E", 2, 2, 2, 2], # Total: - 2-person household: - 2 workers
["B08202_013E", 0, NWOR_MAX, 3, 3], # Total: - 3-person household:
["B08202_014E", 0, 0, 3, 3], # Total: - 3-person household: - No workers
["B08202_015E", 1, 1, 3, 3], # Total: - 3-person household: - 1 worker
["B08202_016E", 2, 2, 3, 3], # Total: - 3-person household: - 2 workers
["B08202_017E", 3, 3, 3, 3], # Total: - 3-person household: - 3 workers
["B08202_018E", 0, NWOR_MAX, 4, NPER_MAX], # Total: - 4-or-more-person household:
["B08202_019E", 0, 0, 4, NPER_MAX], # Total: - 4-or-more-person household: - No workers
["B08202_020E", 1, 1, 4, NPER_MAX], # Total: - 4-or-more-person household: - 1 worker
["B08202_021E", 2, 2, 4, NPER_MAX], # Total: - 4-or-more-person household: - 2 workers
["B08202_022E", 3, NWOR_MAX, 4, NPER_MAX], # Total: - 4-or-more-person household: - 3 or more workers
],
"B11016":[ # acs5, B11016. HOUSEHOLD TYPE BY HOUSEHOLD SIZE
# Universe: Households
["variable", "family", "pers_min", "pers_max"],
["B11016_001E", "All", 0, NPER_MAX], # Total
["B11016_002E", "Family", 0, NPER_MAX], # Family households
["B11016_003E", "Family", 2, 2], # Family households, 2-person household
["B11016_004E", "Family", 3, 3], # Family households, 3-person household
["B11016_005E", "Family", 4, 4], # Family households, 4-person household
["B11016_006E", "Family", 5, 5], # Family households, 5-person household
["B11016_007E", "Family", 6, 6], # Family households, 6-person household
["B11016_008E", "Family", 7, NPER_MAX], # Family households, 7-or-more person household
["B11016_009E", "Nonfamily", 0, NPER_MAX], # Nonfamily households
["B11016_010E", "Nonfamily", 1, 1], # Nonfamily households, 1-person household
["B11016_011E", "Nonfamily", 2, 2], # Nonfamily households, 2-person household
["B11016_012E", "Nonfamily", 3, 3], # Nonfamily households, 3-person household
["B11016_013E", "Nonfamily", 4, 4], # Nonfamily households, 4-person household
["B11016_014E", "Nonfamily", 5, 5], # Nonfamily households, 5-person household
["B11016_015E", "Nonfamily", 6, 6], # Nonfamily households, 6-person household
["B11016_016E", "Nonfamily", 7, NPER_MAX], # Nonfamily households, 7-or-more person household
],
"B19001":[ # acs5, B19001. HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2010 INFLATION-ADJUSTED DOLLARS):
# Universe: Households
# USE acs 2006-2010 https://api.census.gov/data/2010/acs5/variables.html for 2010 dollars
["variable", "hhinc_min", "hhinc_max"],
["B19001_001E", 0, HINC_MAX], # Households
["B19001_002E", 0, 9999], # Households Less than $10,000
["B19001_003E", 10000, 14999], # Households $10,000 to $14,999
["B19001_004E", 15000, 19999], # Households $15,000 to $19,999
["B19001_005E", 20000, 24999], # Households $20,000 to $24,999
["B19001_006E", 25000, 29999], # Households $25,000 to $29,999
["B19001_007E", 30000, 34999], # Households $30,000 to $34,999
["B19001_008E", 35000, 39999], # Households $35,000 to $39,999
["B19001_009E", 40000, 44999], # Households $40,000 to $44,999
["B19001_010E", 45000, 49999], # Households $45,000 to $49,999
["B19001_011E", 50000, 59999], # Households $50,000 to $59,999
["B19001_012E", 60000, 74999], # Households $60,000 to $74,999
["B19001_013E", 75000, 99999], # Households $75,000 to $99,999
["B19001_014E", 100000, 124999], # Households $100,000 to $124,999
["B19001_015E", 125000, 149999], # Households $125,000 to $149,999
["B19001_016E", 150000, 199999], # Households $150,000 to $199,999
["B19001_017E", 200000, HINC_MAX], # Households $200,000 or more
],
"C24010":[ # acs5, C24010. SEX BY OCCUPATION FOR THE CIVILIAN EMPLOYED POPULATION 16 YEARS AND OVER
# Universe: Civilian employed population 16 years and over
["variable", "sex", "occ_cat1", "occ_cat2", "occ_cat3" ],
["C24010_001E", "All", "All", "All", "All" ],
["C24010_002E", "Male", "All", "All", "All" ],
["C24010_003E", "Male", "Management, business, science, and arts", "All", "All" ],
["C24010_004E", "Male", "Management, business, science, and arts", "Management, business, and financial", "All" ],
["C24010_005E", "Male", "Management, business, science, and arts", "Management, business, and financial", "Management" ],
["C24010_006E", "Male", "Management, business, science, and arts", "Management, business, and financial", "Business and financial operations" ],
["C24010_007E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "All" ],
["C24010_008E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Computer and mathematical" ],
["C24010_009E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Architecture and engineering" ],
["C24010_010E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Life, physical, and social science" ],
["C24010_011E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "All" ],
["C24010_012E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Community and social service" ],
["C24010_013E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Legal" ],
["C24010_014E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Education, training, and library" ],
["C24010_015E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Arts, design, entertainment, sports, and media" ],
["C24010_016E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "All" ],
["C24010_017E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health diagnosing and treating practitioners and other technical" ],
["C24010_018E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health technologists and technicians" ],
["C24010_019E", "Male", "Service", "All", "All" ],
["C24010_020E", "Male", "Service", "Healthcare support", "All" ],
["C24010_021E", "Male", "Service", "Protective service", "All" ],
["C24010_022E", "Male", "Service", "Protective service", "Fire fighting and prevention, and other protective service workers"], # including supervisors
["C24010_023E", "Male", "Service", "Protective service", "Law enforcement workers" ], # including supervisors
["C24010_024E", "Male", "Service", "Food preparation and serving related", "All" ],
["C24010_025E", "Male", "Service", "Building and grounds cleaning and maintenance", "All" ],
["C24010_026E", "Male", "Service", "Personal care and service", "All" ],
["C24010_027E", "Male", "Sales and office", "All", "All" ],
["C24010_028E", "Male", "Sales and office", "Sales and related", "All" ],
["C24010_029E", "Male", "Sales and office", "Office and administrative support", "All" ],
["C24010_030E", "Male", "Natural resources, construction, and maintenance", "All", "All" ],
["C24010_031E", "Male", "Natural resources, construction, and maintenance", "Farming, fishing, and forestry", "All" ],
["C24010_032E", "Male", "Natural resources, construction, and maintenance", "Construction and extraction", "All" ],
["C24010_033E", "Male", "Natural resources, construction, and maintenance", "Installation, maintenance, and repair", "All" ],
["C24010_034E", "Male", "Production, transportation, and material moving", "All", "All" ],
["C24010_035E", "Male", "Production, transportation, and material moving", "Production", "All" ],
["C24010_036E", "Male", "Production, transportation, and material moving", "Transportation", "All" ],
["C24010_037E", "Male", "Production, transportation, and material moving", "Material moving", "All" ],
["C24010_038E", "Female", "All", "All", "All" ],
["C24010_039E", "Female", "Management, business, science, and arts", "All", "All" ],
["C24010_040E", "Female", "Management, business, science, and arts", "Management, business, and financial", "All" ],
["C24010_041E", "Female", "Management, business, science, and arts", "Management, business, and financial", "Management" ],
["C24010_042E", "Female", "Management, business, science, and arts", "Management, business, and financial", "Business and financial operations" ],
["C24010_043E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "All" ],
["C24010_044E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Computer and mathematical" ],
["C24010_045E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Architecture and engineering" ],
["C24010_046E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Life, physical, and social science" ],
["C24010_047E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "All" ],
["C24010_048E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Community and social service" ],
["C24010_049E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Legal" ],
["C24010_050E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Education, training, and library" ],
["C24010_051E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", " Arts, design, entertainment, sports, and media" ],
["C24010_052E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "All" ],
["C24010_053E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health diagnosing and treating practitioners and other technical" ],
["C24010_054E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health technologists and technicians" ],
["C24010_055E", "Female", "Service", "All", "All" ],
["C24010_056E", "Female", "Service", "Healthcare support", "All" ],
["C24010_057E", "Female", "Service", "Protective service", "All" ],
["C24010_058E", "Female", "Service", "Protective service", "Fire fighting and prevention, and other protective service" ], # including supervisors
["C24010_059E", "Female", "Service", "Protective service", "Law enforcement workers" ], # including supervisors
["C24010_060E", "Female", "Service", "Food preparation and serving related", "All" ],
["C24010_061E", "Female", "Service", "Building and grounds cleaning and maintenance", "All" ],
["C24010_062E", "Female", "Service", "Personal care and service", "All" ],
["C24010_063E", "Female", "Sales and office", "All", "All" ],
["C24010_064E", "Female", "Sales and office", "Sales and related", "All" ],
["C24010_065E", "Female", "Sales and office", "Office and administrative support", "All" ],
["C24010_066E", "Female", "Natural resources, construction, and maintenance", "All", "All" ],
["C24010_067E", "Female", "Natural resources, construction, and maintenance", "Farming, fishing, and forestry", "All" ],
["C24010_068E", "Female", "Natural resources, construction, and maintenance", "Construction and extraction", "All" ],
["C24010_069E", "Female", "Natural resources, construction, and maintenance", "Installation, maintenance, and repair", "All" ],
["C24010_070E", "Female", "Production, transportation, and material moving", "All", "All" ],
["C24010_071E", "Female", "Production, transportation, and material moving", "Production", "All" ],
["C24010_072E", "Female", "Production, transportation, and material moving", "Transportation", "All" ],
["C24010_073E", "Female", "Production, transportation, and material moving", "Material moving", "All" ],
]
}
def __init__(self):
"""
Read the census api key and instantiate the census object.
"""
# read the census api key
with open(CensusFetcher.API_KEY_FILE) as f: self.CENSUS_API_KEY = f.read()
self.census = census.Census(self.CENSUS_API_KEY)
logging.debug("census object instantiated")
def get_census_data(self, dataset, year, table, geo):
"""
Dataset is one of "sf1" or "ac5"
Year is a number for the table
Geo is one of "block", "block group", "tract", "county subdivision" or "county"
"""
if dataset not in ["sf1","acs5"]:
raise ValueError("get_census_data only supports datasets 'sf1' and 'acs5'")
if geo not in ["block", "block group", "tract", "county subdivision", "county"]:
raise ValueError("get_census_data received unsupported geo {0}".format(geo))
if table not in CensusFetcher.CENSUS_DEFINITIONS.keys():
raise ValueError("get_census_data received unsupported table {0}".format(table))
table_cache_file = os.path.join(CensusFetcher.LOCAL_CACHE_FOLDER, "{0}_{1}_{2}_{3}.csv".format(dataset,year,table,geo))
logging.info("Checking for table cache at {0}".format(table_cache_file))
# lookup table definition
table_def = CensusFetcher.CENSUS_DEFINITIONS[table]
# logging.debug(table_def)
table_cols = table_def[0] # e.g. ['variable', 'pers_min', 'pers_max']
if geo=="block":
geo_index = ["state","county","tract","block"]
elif geo=="block group":
geo_index = ["state","county","tract","block group"]
elif geo=="tract":
geo_index = ["state","county","tract"]
elif geo=="county subdivision":
geo_index = ["state","county","county subdivision"]
elif geo=="county":
geo_index = ["state","county"]
# lookup cache and return, if it exists
if os.path.exists(table_cache_file):
logging.info("Reading {0}".format(table_cache_file))
dtypes_dict = {k:object for k in geo_index}
# This version doesn't make the index columns into strings
# full_df_v1 = pandas.read_csv(table_cache_file,
# header=range(len(table_cols)),
# index_col=range(len(geo_index)), dtype=dtypes_dict)
# we want the index columns as strings
# https://github.com/pandas-dev/pandas/issues/9435
full_df = pandas.read_csv(table_cache_file, dtype=dtypes_dict, skiprows=len(table_cols)).set_index(geo_index)
full_df_cols = pandas.read_csv(table_cache_file,
header=range(len(table_cols)),
index_col=range(len(geo_index)),nrows=0).columns
full_df.columns = full_df_cols
return full_df
multi_col_def = [] # we'll build this
full_df = None # and this
for census_col in table_def[1:]:
# census_col looks like ['H0130001', 1, 10]
# fetch for one county at a time
df = pandas.DataFrame()
# loop through counties (unless getting at county level)
county_codes = CensusFetcher.BAY_AREA_COUNTY_FIPS.values()
if geo=="county": county_codes = ["do_once"]
for county_code in county_codes:
if geo == "county":
geo_dict = {'for':'{0}:*'.format(geo), 'in':'state:{0}'.format(CensusFetcher.CA_STATE_FIPS)}
else:
geo_dict = {'for':'{0}:*'.format(geo),
'in':'state:{0} county:{1}'.format(CensusFetcher.CA_STATE_FIPS, county_code)}
if dataset == "sf1":
county_df = pandas.DataFrame.from_records(self.census.sf1.get(census_col[0], geo_dict, year=year)).set_index(geo_index)
elif dataset == "acs5":
county_df = pandas.DataFrame.from_records(self.census.acs5.get(census_col[0], geo_dict, year=year)).set_index(geo_index)
# force the data column to be a float -- sometimes they're objects which won't work
county_df = county_df.astype(float)
df = df.append(county_df)
# join with existing full_df
if len(multi_col_def) == 0:
full_df = df
else:
full_df = full_df.merge(df, left_index=True, right_index=True)
# note column defs
multi_col_def.append(census_col)
if geo=="county":
# if we fetched for county then we have all counties -- restrict to just the counties we care about
county_tuples = [(CensusFetcher.CA_STATE_FIPS, x) for x in CensusFetcher.BAY_AREA_COUNTY_FIPS.values()]
full_df = full_df.loc[county_tuples]
# logging.debug(full_df.head())
# now we have the table with multiple columns -- name the columns with decoded names
full_df.columns = | pandas.MultiIndex.from_tuples(multi_col_def, names=table_cols) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python
# coding: utf-8
# <b> Run below cells from the folder that contains ads16_dataset/ unzipped </b>
# In[1]:
import pandas as pd
import glob
import pathlib
import re
# In[2]:
pd.set_option('display.max_colwidth', -1)
# In[3]:
# Global constants
g_userPart1PathPrefix = "./ads16-dataset/ADS16_Benchmark_part1/ADS16_Benchmark_part1/Corpus/Corpus/"
g_userPart2PathPrefix = "./ads16-dataset/ADS16_Benchmark_part2/ADS16_Benchmark_part2/Corpus/Corpus/"
g_userIdPrefix = "U0"
g_adsPart1PathPrefix = "./ads16-dataset/ADS16_Benchmark_part1/ADS16_Benchmark_part1/Ads/Ads/"
g_adsPart2PathPrefix = "./ads16-dataset/ADS16_Benchmark_part2/ADS16_Benchmark_part2/Ads/Ads/"
# # UDFs
# ## UDFs for generating Users Dataset
# In[4]:
def generate_data_User( pathPrefix, userId ):
completePath = pathPrefix + userId + "/"
# INF
infFile = userId + "-INF.csv"
userInf_df = pd.read_csv(completePath + infFile, delimiter=";")
# Pref
prefFile = userId + "-PREF.csv"
userPref_df = pd.read_csv(completePath + prefFile, delimiter=";")
user_df = pd.concat([userInf_df, userPref_df], axis=1)
# Pos
posFile = userId + "-IM-POS.csv"
userPos_df = pd.read_csv(completePath + posFile, delimiter=";")
userPos_df = userPos_df.iloc[1:]
userPos_df.reset_index(drop=True, inplace=True)
user_df = pd.concat([user_df, userPos_df], axis=1)
# Neg
negFile = userId + "-IM-NEG.csv"
userNeg_df = pd.read_csv(completePath + negFile, delimiter=";")
userNeg_df = userNeg_df.iloc[1:]
userNeg_df.reset_index(drop=True, inplace=True)
user_df = pd.concat([user_df, userNeg_df], axis=1)
user_df.insert(0, "UserId", userId, True)
# user_df = user_df.set_index('UserId')
# user_df.info()
return user_df
# In[5]:
def generate_data_partUsers( usersPartPathPrefix, startRange, endRange ):
partUsers_df = pd.DataFrame()
for i in range(startRange, endRange):
thisUserIdNum = str(i)
thisUserId = g_userIdPrefix + thisUserIdNum.zfill(3)
# print(thisUserId)
thisUser_df = generate_data_User(usersPartPathPrefix, thisUserId)
partUsers_df = partUsers_df.append(thisUser_df, sort=True)
partUsers_df.set_index('UserId')
return partUsers_df
# In[6]:
def generate_data_allUsers():
allUsers_df = pd.DataFrame()
part1Users_df = generate_data_partUsers(g_userPart1PathPrefix, 1, 61)
allUsers_df = allUsers_df.append(part1Users_df, sort=True)
part2Users_df = generate_data_partUsers(g_userPart2PathPrefix, 61, 121)
allUsers_df = allUsers_df.append(part2Users_df, sort=True)
return allUsers_df
# ## UDFs for generating Ads Dataset
# In[7]:
def generate_data_adCats():
adCatsLst = [['01', "Clothing & Shoes", 16],
['02', "Automotive", 15],
['03', "Baby Products", 15],
['04', "Health & Beauty", 15],
['05', "Media (BMVD)", 15],
['06', "Consumer Electronics", 15],
['07', "Console & Video Games", 15],
['08', "DIY & Tools", 15],
['09', "Garden & Outdoor living", 15],
['10', "Grocery", 15],
['11', "Kitchen & Home", 15],
['12', "Betting", 15],
['13', "Jewellery & Watches", 15],
['14', "Musical Instruments", 15],
['15', "Office Products", 15],
['16', "Pet Supplies", 15],
['17', "Computer Software", 15],
['18', "Sports & Outdoors", 15],
['19', "Toys & Games", 15],
['20', "Dating Sites", 15]
]
adCats_df = pd.DataFrame(adCatsLst, columns =['AdCatId', 'AdCatName', 'AdCatNumAds'])
return adCats_df
# In[8]:
import re
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)',text.split('/')[-1].split('.')[0]) ]
def generate_data_partAds( adsPartPathPrefix, startRange, endRange ):
partAds_df = pd.DataFrame()
partAdsRows = []
for i in range(startRange, endRange):
iStr = str(i)
adsFiles = pathlib.Path(adsPartPathPrefix + iStr + "/").glob("*.png")
adsFileStrLst = []
for adsFile in adsFiles:
adsFileStr = str(adsFile)
adsFileStrLst.append(adsFileStr)
adsFileStrLst.sort(key=natural_keys)
for adsFileStr in adsFileStrLst:
adId = adsFileStr.split('/')[-1].split('.')[0]
adId = "A" + iStr.zfill(2) + "_" + adId.zfill(2)
# print(adId, adsFileStr)
partAdsRows.append([adId, adsFileStr])
partAds_df = pd.DataFrame(partAdsRows, columns =['AdId', 'AdFilePath'])
partAds_df.set_index('AdId')
return partAds_df
# In[9]:
# DEBUG
def generate_data_allAds():
allAds_df = pd.DataFrame()
part1Ads_df = generate_data_partAds(g_adsPart1PathPrefix, 1, 11)
allAds_df = allAds_df.append(part1Ads_df, sort=True)
part2Ads_df = generate_data_partAds(g_adsPart2PathPrefix, 11, 21)
allAds_df = allAds_df.append(part2Ads_df, sort=True)
allAds_df = allAds_df.set_index('AdId')
return allAds_df
# ## UDFs for generating Ratings Dataset
# In[10]:
def df_crossjoin(df1, df2):
df1['_tmpkey'] = 1
df2['_tmpkey'] = 1
res = | pd.merge(df1, df2, on='_tmpkey') | pandas.merge |
#!/usr/bin/env python3
# coding: utf-8
import json
import numpy as np
import pandas as pd
from visdom import Visdom
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
# import seaborn
# seaborn.set()
# import pylab import mpl
import os, time, sys, pickle
from datetime import datetime
from dateutil.parser import parse
font = {'family': 'SimHei'}
xlabel_dict = {'social_average': u'社会平均', 'state_owned': u'国有单位',
'house_price': u'每平米单价'}
ylabel_dict = {'salary_house': u'工资-房价(RMB)'}
def show_salary_chart(ylabel=''):
global ylabel_dict
plt.style.use('seaborn-whitegrid')
plt.xlabel(u'时间轴', fontproperties='SimHei')
plt.xticks(rotation=-90)
plt.title(ylabel, fontproperties='SimHei')
# plt.xlim(2000, 2020)
# plt.ylim(-1, max_pe+10)
plt.legend(loc=0, prop=font)
plt.grid(True)
viz = Visdom(env='main')
viz.matplot(plt)
if __name__=='__main__':
if len(sys.argv) < 3:
print("please enter the house price and annual salary csv path")
raise SystemExit(1)
csv_data_file = sys.argv[1]
data_frame = pd.read_csv(csv_data_file)
date_list = data_frame['year'].apply(str).apply(parse)
# 每平米单价
data_array = np.array(data_frame[xlabel_dict['house_price']])
price = pd.Series(data_array, index=date_list.values).sort_index(ascending=False)
plt.plot(price.index, price.values, label=xlabel_dict['house_price'])
csv_data_file = sys.argv[2]
data_frame = | pd.read_csv(csv_data_file) | pandas.read_csv |
from pathlib import Path
from sklearn.model_selection import train_test_split
import pandas as pd
def read_and_clean_dataset(path_to_csv: Path) -> pd.DataFrame:
df = pd.read_csv(str(path_to_csv), header=None, sep=' ')
del df[7]
df.columns = ['filename', 'class', 'train_sub_class', 'x1', 'y1', 'x2', 'y2']
new_df = []
for filename, objets in df.groupby('filename'):
for unique_row, _ in objets.groupby(['class', 'x1', 'y1', 'x2', 'y2']):
new_df.append([filename] + list(unique_row))
new_df = pd.DataFrame(new_df, columns=['filename', 'class', 'x1', 'y1', 'x2', 'y2'])
return new_df
def train_split(df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):
train_data = []
val_data = []
for _, class_df in df.groupby('class'):
filenames = class_df['filename'].unique()
filenames_train, filenames_val = train_test_split(filenames, test_size=0.2)
class_df_train = class_df[class_df['filename'].isin(filenames_train)]
class_df_val = class_df[class_df['filename'].isin(filenames_val)]
train_data.append(class_df_train)
val_data.append(class_df_val)
train_data = pd.concat(train_data)
val_data = pd.concat(val_data)
return train_data, val_data
def create_test_set(path_to_test_file: Path) -> pd.DataFrame:
df = | pd.read_csv(path_to_test_file, header=None, sep='\t', na_values='none') | pandas.read_csv |
"""Module containing all required information about the interface between raw (or transformed)
public data and DiCE explainers."""
import logging
from collections import defaultdict
import numpy as np
import pandas as pd
from dice_ml.data_interfaces.base_data_interface import _BaseData
from dice_ml.utils.exception import (SystemException,
UserConfigValidationException)
class PublicData(_BaseData):
"""A data interface for public data. This class is an interface to DiCE explainers
and contains methods to transform user-fed raw data into the format a DiCE explainer
requires, and vice versa."""
def __init__(self, params):
"""Init method
:param dataframe: The train dataframe used by explainer method.
:param continuous_features: List of names of continuous features. The remaining features are categorical features.
:param outcome_name: Outcome feature name.
:param permitted_range (optional): Dictionary with feature names as keys and permitted range in list as values.
Defaults to the range inferred from training data.
:param continuous_features_precision (optional): Dictionary with feature names as keys and precisions as values.
:param data_name (optional): Dataset name
"""
self._validate_and_set_outcome_name(params=params)
self._validate_and_set_dataframe(params=params)
self._validate_and_set_continuous_features(params=params)
self.feature_names = [
name for name in self.data_df.columns.tolist() if name != self.outcome_name]
self.number_of_features = len(self.feature_names)
if len(set(self.continuous_feature_names) - set(self.feature_names)) != 0:
raise UserConfigValidationException(
"continuous_features contains some feature names which are not part of columns in dataframe"
)
self.categorical_feature_names = [name for name in self.data_df.columns.tolist(
) if name not in self.continuous_feature_names + [self.outcome_name]]
self.categorical_feature_indexes = [self.data_df.columns.get_loc(
name) for name in self.categorical_feature_names if name in self.data_df]
self._validate_and_set_continuous_features_precision(params=params)
if len(self.categorical_feature_names) > 0:
for feature in self.categorical_feature_names:
self.data_df[feature] = self.data_df[feature].apply(str)
self.data_df[self.categorical_feature_names] = self.data_df[self.categorical_feature_names].astype(
'category')
if len(self.continuous_feature_names) > 0:
for feature in self.continuous_feature_names:
if self.get_data_type(feature) == 'float':
self.data_df[feature] = self.data_df[feature].astype(
np.float32)
else:
self.data_df[feature] = self.data_df[feature].astype(
np.int32)
# should move the below snippet to gradient based dice interfaces
# self.one_hot_encoded_data = self.one_hot_encode_data(self.data_df)
# self.ohe_encoded_feature_names = [x for x in self.one_hot_encoded_data.columns.tolist(
# ) if x not in np.array([self.outcome_name])]
# should move the below snippet to model agnostic dice interfaces
# # Initializing a label encoder to obtain label-encoded values for categorical variables
# self.labelencoder = {}
#
# self.label_encoded_data = self.data_df.copy()
#
# for column in self.categorical_feature_names:
# self.labelencoder[column] = LabelEncoder()
# self.label_encoded_data[column] = self.labelencoder[column].fit_transform(self.data_df[column])
self._validate_and_set_permitted_range(params=params)
# should move the below snippet to model agnostic dice interfaces
# self.max_range = -np.inf
# for feature in self.continuous_feature_names:
# self.max_range = max(self.max_range, self.permitted_range[feature][1])
self._validate_and_set_data_name(params=params)
def _validate_and_set_dataframe(self, params):
"""Validate and set the dataframe."""
if 'dataframe' not in params:
raise ValueError("dataframe not found in params")
if isinstance(params['dataframe'], pd.DataFrame):
self.data_df = params['dataframe'].copy()
else:
raise ValueError("should provide a pandas dataframe")
if 'outcome_name' in params and params['outcome_name'] not in self.data_df.columns.tolist():
raise UserConfigValidationException(
"outcome_name {0} not found in {1}".format(
params['outcome_name'], ','.join(self.data_df.columns.tolist())
)
)
def _validate_and_set_continuous_features(self, params):
"""Validate and set the list of continuous features."""
if 'continuous_features' not in params:
raise ValueError('continuous_features should be provided')
if type(params['continuous_features']) is list:
self.continuous_feature_names = params['continuous_features']
else:
raise ValueError(
"should provide the name(s) of continuous features in the data as a list")
def _validate_and_set_continuous_features_precision(self, params):
"""Validate and set the dictionary of precision for continuous features."""
if 'continuous_features_precision' in params:
self.continuous_features_precision = params['continuous_features_precision']
if not hasattr(self, 'feature_names'):
raise SystemException('Feature names not correctly set in public data interface')
for continuous_features_precision_feature_name in self.continuous_features_precision:
if continuous_features_precision_feature_name not in self.feature_names:
raise UserConfigValidationException(
"continuous_features_precision contains some feature names which are not part of columns in dataframe"
)
else:
self.continuous_features_precision = None
def _validate_and_set_permitted_range(self, params):
"""Validate and set the dictionary of permitted ranges for continuous features."""
input_permitted_range = None
if 'permitted_range' in params:
input_permitted_range = params['permitted_range']
if not hasattr(self, 'feature_names'):
raise SystemException('Feature names not correctly set in public data interface')
for input_permitted_range_feature_name in input_permitted_range:
if input_permitted_range_feature_name not in self.feature_names:
raise UserConfigValidationException(
"permitted_range contains some feature names which are not part of columns in dataframe"
)
self.permitted_range, _ = self.get_features_range(input_permitted_range)
def check_features_to_vary(self, features_to_vary):
if features_to_vary is not None and features_to_vary != 'all':
not_training_features = set(features_to_vary) - set(self.feature_names)
if len(not_training_features) > 0:
raise UserConfigValidationException("Got features {0} which are not present in training data".format(
not_training_features))
def check_permitted_range(self, permitted_range):
if permitted_range is not None:
permitted_range_features = list(permitted_range)
not_training_features = set(permitted_range_features) - set(self.feature_names)
if len(not_training_features) > 0:
raise UserConfigValidationException("Got features {0} which are not present in training data".format(
not_training_features))
for feature in permitted_range_features:
if feature in self.categorical_feature_names:
train_categories = self.permitted_range[feature]
for test_category in permitted_range[feature]:
if test_category not in train_categories:
raise UserConfigValidationException(
'The category {0} does not occur in the training data for feature {1}.'
' Allowed categories are {2}'.format(test_category, feature, train_categories))
def check_mad_validity(self, feature_weights):
"""checks feature MAD validity and throw warnings.
TODO: add comments as to where this is used if this function is necessary, else remove.
"""
if feature_weights == "inverse_mad":
self.get_valid_mads(display_warnings=True, return_mads=False)
def get_features_range(self, permitted_range_input=None):
ranges = {}
# Getting default ranges based on the dataset
for feature_name in self.continuous_feature_names:
ranges[feature_name] = [
self.data_df[feature_name].min(), self.data_df[feature_name].max()]
for feature_name in self.categorical_feature_names:
ranges[feature_name] = self.data_df[feature_name].unique().tolist()
feature_ranges_orig = ranges.copy()
# Overwriting the ranges for a feature if input provided
if permitted_range_input is not None:
for feature_name, feature_range in permitted_range_input.items():
ranges[feature_name] = feature_range
return ranges, feature_ranges_orig
def get_data_type(self, col):
"""Infers data type of a continuous feature from the training data."""
if (self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32):
return 'int'
elif (self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32):
return 'float'
else:
raise ValueError("Unknown data type of feature %s: must be int or float" % col)
def one_hot_encode_data(self, data):
"""One-hot-encodes the data."""
return pd.get_dummies(data, drop_first=False, columns=self.categorical_feature_names)
def normalize_data(self, df):
"""Normalizes continuous features to make them fall in the range [0,1]."""
result = df.copy()
if isinstance(df, pd.DataFrame) or isinstance(df, dict):
for feature_name in self.continuous_feature_names:
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
if min_value == max_value:
result[feature_name] = 0
else:
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
else:
result = result.astype('float')
for feature_index in self.continuous_feature_indexes:
feature_name = self.feature_names[feature_index]
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
if len(df.shape) == 1:
if min_value == max_value:
value = 0
else:
value = (df[feature_index] - min_value) / (max_value - min_value)
result[feature_index] = value
else:
if min_value == max_value:
result[:, feature_index] = np.zeros(len(df[:, feature_index]))
else:
result[:, feature_index] = (df[:, feature_index] - min_value) / (max_value - min_value)
return result
def de_normalize_data(self, df):
"""De-normalizes continuous features from [0,1] range to original range."""
if len(df) == 0:
return df
result = df.copy()
for feature_name in self.continuous_feature_names:
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
result[feature_name] = (
df[feature_name] * (max_value - min_value)) + min_value
return result
def get_valid_feature_range(self, feature_range_input, normalized=True):
"""Gets the min/max value of features in normalized or de-normalized
form. Assumes that all features are already encoded to numerical form
such that the number of features remains the same.
# TODO needs work adhere to label encoded max and to support permitted_range for
both continuous and discrete when provided in _generate_counterfactuals.
"""
feature_range = {}
for _, feature_name in enumerate(self.feature_names):
feature_range[feature_name] = []
if feature_name in self.continuous_feature_names:
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
if normalized:
minx = (feature_range_input[feature_name]
[0] - min_value) / (max_value - min_value)
maxx = (feature_range_input[feature_name]
[1] - min_value) / (max_value - min_value)
else:
minx = feature_range_input[feature_name][0]
maxx = feature_range_input[feature_name][1]
feature_range[feature_name].append(minx)
feature_range[feature_name].append(maxx)
else:
# categorical features
feature_range[feature_name] = feature_range_input[feature_name]
return feature_range
def get_minx_maxx(self, normalized=True):
"""Gets the min/max value of features in normalized or de-normalized form."""
minx = np.array([[0.0] * len(self.ohe_encoded_feature_names)])
maxx = np.array([[1.0] * len(self.ohe_encoded_feature_names)])
for idx, feature_name in enumerate(self.continuous_feature_names):
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
if normalized:
minx[0][idx] = (self.permitted_range[feature_name]
[0] - min_value) / (max_value - min_value)
maxx[0][idx] = (self.permitted_range[feature_name]
[1] - min_value) / (max_value - min_value)
else:
minx[0][idx] = self.permitted_range[feature_name][0]
maxx[0][idx] = self.permitted_range[feature_name][1]
return minx, maxx
# if encoding=='one-hot':
# minx = np.array([[0.0] * len(self.ohe_encoded_feature_names)])
# maxx = np.array([[1.0] * len(self.ohe_encoded_feature_names)])
# for idx, feature_name in enumerate(self.continuous_feature_names):
# max_value = self.train_df[feature_name].max()
# min_value = self.train_df[feature_name].min()
# if normalized:
# minx[0][idx] = (self.permitted_range[feature_name]
# [0] - min_value) / (max_value - min_value)
# maxx[0][idx] = (self.permitted_range[feature_name]
# [1] - min_value) / (max_value - min_value)
# else:
# minx[0][idx] = self.permitted_range[feature_name][0]
# maxx[0][idx] = self.permitted_range[feature_name][1]
# else:
# minx = np.array([[0.0] * len(self.feature_names)])
# maxx = np.array([[1.0] * len(self.feature_names)])
def get_mads(self, normalized=False):
"""Computes Median Absolute Deviation of features."""
mads = {}
if normalized is False:
for feature in self.continuous_feature_names:
mads[feature] = np.median(
abs(self.data_df[feature].values - np.median(self.data_df[feature].values)))
else:
normalized_train_df = self.normalize_data(self.data_df)
for feature in self.continuous_feature_names:
mads[feature] = np.median(
abs(normalized_train_df[feature].values - np.median(normalized_train_df[feature].values)))
return mads
def get_valid_mads(self, normalized=False, display_warnings=False, return_mads=True):
"""Computes Median Absolute Deviation of features. If they are <=0, returns a practical value instead"""
mads = self.get_mads(normalized=normalized)
for feature in mads:
if mads[feature] <= 0:
mads[feature] = 1.0
if display_warnings:
logging.warning(" MAD for feature %s is 0, so replacing it with 1.0 to avoid error.", feature)
if return_mads:
return mads
def get_quantiles_from_training_data(self, quantile=0.05, normalized=False):
"""Computes required quantile of Absolute Deviations of features."""
quantiles = {}
if normalized is False:
for feature in self.continuous_feature_names:
quantiles[feature] = np.quantile(
abs(list(set(self.data_df[feature].tolist())) - np.median(
list(set(self.data_df[feature].tolist())))), quantile)
else:
normalized_train_df = self.normalize_data(self.data_df)
for feature in self.continuous_feature_names:
quantiles[feature] = np.quantile(
abs(list(set(normalized_train_df[feature].tolist())) - np.median(
list(set(normalized_train_df[feature].tolist())))), quantile)
return quantiles
def create_ohe_params(self):
if len(self.categorical_feature_names) > 0:
one_hot_encoded_data = self.one_hot_encode_data(self.data_df)
self.ohe_encoded_feature_names = [x for x in one_hot_encoded_data.columns.tolist(
) if x not in np.array([self.outcome_name])]
else:
# one-hot-encoded data is same as original data if there is no categorical features.
self.ohe_encoded_feature_names = [feat for feat in self.feature_names]
# base dataframe for doing one-hot-encoding
# ohe_encoded_feature_names and ohe_base_df are created (and stored as data class's parameters)
# when get_data_params_for_gradient_dice() is called from gradient-based DiCE explainers
self.ohe_base_df = self.prepare_df_for_ohe_encoding()
def get_data_params_for_gradient_dice(self):
"""Gets all data related params for DiCE."""
self.create_ohe_params()
minx, maxx = self.get_minx_maxx(normalized=True)
# get the column indexes of categorical and continuous features after one-hot-encoding
encoded_categorical_feature_indexes = self.get_encoded_categorical_feature_indexes()
flattened_indexes = [item for sublist in encoded_categorical_feature_indexes for item in sublist]
encoded_continuous_feature_indexes = [ix for ix in range(len(minx[0])) if ix not in flattened_indexes]
# min and max for continuous features in original scale
org_minx, org_maxx = self.get_minx_maxx(normalized=False)
cont_minx = list(org_minx[0][encoded_continuous_feature_indexes])
cont_maxx = list(org_maxx[0][encoded_continuous_feature_indexes])
# decimal precisions for continuous features
cont_precisions = [self.get_decimal_precisions()[ix] for ix in range(len(self.continuous_feature_names))]
return minx, maxx, encoded_categorical_feature_indexes, encoded_continuous_feature_indexes, cont_minx, \
cont_maxx, cont_precisions
def get_encoded_categorical_feature_indexes(self):
"""Gets the column indexes categorical features after one-hot-encoding."""
cols = []
for col_parent in self.categorical_feature_names:
temp = [self.ohe_encoded_feature_names.index(
col) for col in self.ohe_encoded_feature_names if col.startswith(col_parent) and
col not in self.continuous_feature_names]
cols.append(temp)
return cols
def get_indexes_of_features_to_vary(self, features_to_vary='all'):
"""Gets indexes from feature names of one-hot-encoded data."""
# TODO: add encoding as a parameter and use the function get_indexes_of_features_to_vary for label encoding too
if features_to_vary == "all":
return [i for i in range(len(self.ohe_encoded_feature_names))]
else:
ixs = []
encoded_cats_ixs = self.get_encoded_categorical_feature_indexes()
encoded_cats_ixs = [item for sublist in encoded_cats_ixs for item in sublist]
for colidx, col in enumerate(self.ohe_encoded_feature_names):
if colidx in encoded_cats_ixs and col.startswith(tuple(features_to_vary)):
ixs.append(colidx)
elif colidx not in encoded_cats_ixs and col in features_to_vary:
ixs.append(colidx)
return ixs
def from_label(self, data):
"""Transforms label encoded data back to categorical values"""
out = data.copy()
if isinstance(data, pd.DataFrame) or isinstance(data, dict):
for column in self.categorical_feature_names:
out[column] = self.labelencoder[column].inverse_transform(out[column].round().astype(int).tolist())
return out
elif isinstance(data, list):
for c in self.categorical_feature_indexes:
out[c] = self.labelencoder[self.feature_names[c]].inverse_transform([round(out[c])])[0]
return out
def from_dummies(self, data, prefix_sep='_'):
"""Gets the original data from dummy encoded data with k levels."""
out = data.copy()
for feat in self.categorical_feature_names:
# first, derive column names in the one-hot-encoded data from the original data
cat_col_values = []
for val in list(self.data_df[feat].unique()):
cat_col_values.append(feat + prefix_sep + str(
val)) # join original feature name and its unique values , ex: education_school
match_cols = [c for c in data.columns if
c in cat_col_values] # check for the above matching columns in the encoded data
# then, recreate original data by removing the suffixes - based on the GitHub issue comment:
# https://github.com/pandas-dev/pandas/issues/8745#issuecomment-417861271
cols, labs = [[c.replace(
x, "") for c in match_cols] for x in ["", feat + prefix_sep]]
out[feat] = pd.Categorical(
np.array(labs)[np.argmax(data[cols].values, axis=1)])
out.drop(cols, axis=1, inplace=True)
return out
def get_decimal_precisions(self, output_type="list"):
""""Gets the precision of continuous features in the data."""
# if the precision of a continuous feature is not given, we use the maximum precision of the modes to capture the
# precision of majority of values in the column.
precisions_dict = defaultdict(int)
precisions = [0] * len(self.feature_names)
for ix, col in enumerate(self.continuous_feature_names):
if (self.continuous_features_precision is not None) and (col in self.continuous_features_precision):
precisions[ix] = self.continuous_features_precision[col]
precisions_dict[col] = self.continuous_features_precision[col]
elif self.data_df[col].dtype == np.float32 or self.data_df[col].dtype == np.float64:
modes = self.data_df[col].mode()
maxp = len(str(modes[0]).split('.')[1]) # maxp stores the maximum precision of the modes
for mx in range(len(modes)):
prec = len(str(modes[mx]).split('.')[1])
if prec > maxp:
maxp = prec
precisions[ix] = maxp
precisions_dict[col] = maxp
if output_type == "list":
return precisions
elif output_type == "dict":
return precisions_dict
def get_decoded_data(self, data, encoding='one-hot'):
"""Gets the original data from encoded data."""
if len(data) == 0:
return data
index = [i for i in range(0, len(data))]
if encoding == 'one-hot':
if isinstance(data, pd.DataFrame):
return self.from_dummies(data)
elif isinstance(data, np.ndarray):
data = pd.DataFrame(data=data, index=index,
columns=self.ohe_encoded_feature_names)
return self.from_dummies(data)
else:
raise ValueError("data should be a pandas dataframe or a numpy array")
elif encoding == 'label':
data = pd.DataFrame(data=data, index=index,
columns=self.feature_names)
return data
def prepare_df_for_ohe_encoding(self):
"""Create base dataframe to do OHE for a single instance or a set of instances"""
levels = []
colnames = [feat for feat in self.categorical_feature_names]
for cat_feature in colnames:
levels.append(self.data_df[cat_feature].cat.categories.tolist())
if len(colnames) > 0:
df = pd.DataFrame({colnames[0]: levels[0]})
else:
df = pd.DataFrame()
for col in range(1, len(colnames)):
temp_df = pd.DataFrame({colnames[col]: levels[col]})
df = | pd.concat([df, temp_df], axis=1, sort=False) | pandas.concat |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data= | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue May 3 10:49:58 2016
Auger peak finding and quantitative routines ... batch processing
@author: tkc
First get it working for single file.
"""
#%%
import pandas as pd
import numpy as np
import os, sys, shutil, glob, re
if 'C:\\Users\\tkc\\Documents\\Python_Scripts' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts')
import Auger_smdifquant_functions as AESsmquant
import Auger_integquant_functions as AESintquant
import Auger_utility_functions as AESutils
import Auger_plot_functions as AESplot
''' AESsmquant contains functions related to peak finding in smooth-differentiated spectra
whereas AESquant contains background fitting and integration over peaks for direct from counts '''
# import Auger_integquant_functions as AESquant
#%% REFIT of problematic peaks
# Manual refitting of failed fits on single peaks (usually Ca)
# filter with SPE list above
AugerParamLog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\AESquantparams.csv', encoding='utf-8') # global version
# ALTERNATE QUANT and FIT PARAMS (which are sometimes used if problems arise)
AESquantparams=pd.read_csv('AESquantparams.csv', encoding='utf-8') # load local version instead
AESquantparams= | pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\AESquantparams_Ca_refit.csv', encoding='utf-8') | pandas.read_csv |
"""
Calculation of metrics including accuracy, AUROC, and PRC, outputing CAM of tiles, and output
last layer activation for tSNE 2.0
Created on 04/26/2019
@author: RH
"""
import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
import sklearn.metrics
from scipy import interp
import matplotlib.pyplot as plt
import pandas as pd
import cv2
from itertools import cycle
# Plot ROC and PRC plots
def ROC_PRC(outtl, pdx, path, name, fdict, dm, accur, pmd):
if pmd == 'subtype':
rdd = 4
else:
rdd = 2
if rdd > 2:
# Compute ROC and PRC curve and ROC and PRC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
# PRC
# For each class
precision = dict()
recall = dict()
average_precision = dict()
microy = []
microscore = []
for i in range(rdd):
fpr[i], tpr[i], _ = sklearn.metrics.roc_curve(np.asarray((outtl.iloc[:, 0].values == int(i)).astype('uint8')),
np.asarray(pdx[:, i]).ravel())
try:
roc_auc[i] = sklearn.metrics.roc_auc_score(np.asarray((outtl.iloc[:, 0].values == int(i)).astype('uint8')),
np.asarray(pdx[:, i]).ravel())
except ValueError:
roc_auc[i] = np.nan
microy.extend(np.asarray((outtl.iloc[:, 0].values == int(i)).astype('uint8')))
microscore.extend(np.asarray(pdx[:, i]).ravel())
precision[i], recall[i], _ = \
sklearn.metrics.precision_recall_curve(np.asarray((outtl.iloc[:, 0].values == int(i)).astype('uint8')),
np.asarray(pdx[:, i]).ravel())
try:
average_precision[i] = \
sklearn.metrics.average_precision_score(np.asarray((outtl.iloc[:, 0].values == int(i)).astype('uint8')),
np.asarray(pdx[:, i]).ravel())
except ValueError:
average_precision[i] = np.nan
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = sklearn.metrics.roc_curve(np.asarray(microy).ravel(),
np.asarray(microscore).ravel())
roc_auc["micro"] = sklearn.metrics.auc(fpr["micro"], tpr["micro"])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = sklearn.metrics.precision_recall_curve(np.asarray(microy).ravel(),
np.asarray(microscore).ravel())
average_precision["micro"] = sklearn.metrics.average_precision_score(np.asarray(microy).ravel(),
np.asarray(microscore).ravel(),
average="micro")
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(rdd)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(rdd):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= rdd
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = sklearn.metrics.auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.5f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.5f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red'])
for i, color in zip(range(rdd), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label='ROC curve of {0} (area = {1:0.5f})'.format(fdict[i], roc_auc[i]))
print('{0} AUC of {1} = {2:0.5f}'.format(dm, fdict[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of {}'.format(name))
plt.legend(loc="lower right")
plt.savefig("../Results/{}/out/{}_{}_ROC.png".format(path, name, dm))
print('Average precision score, micro-averaged over all classes: {0:0.5f}'.format(average_precision["micro"]))
# Plot all PRC curves
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal', 'red'])
plt.figure(figsize=(7, 9))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.5f})'
''.format(average_precision["micro"]))
for i, color in zip(range(rdd), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for {0} (area = {1:0.5f})'.format(fdict[i], average_precision[i]))
print('{0} Average Precision of {1} = {2:0.5f}'.format(dm, fdict[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('{} Precision-Recall curve: Average Accu={}'.format(name, accur))
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=12))
plt.savefig("../Results/{}/out/{}_{}_PRC.png".format(path, name, dm))
else:
tl = outtl.values[:, 0].ravel()
y_score = np.asarray(pdx[:, 1]).ravel()
auc = sklearn.metrics.roc_auc_score(tl, y_score)
auc = round(auc, 5)
print('{0} AUC = {1:0.5f}'.format(dm, auc))
fpr, tpr, _ = sklearn.metrics.roc_curve(tl, y_score)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.5f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('{} ROC of {}'.format(name, pmd))
plt.legend(loc="lower right")
plt.savefig("../Results/{}/out/{}_{}_ROC.png".format(path, name, dm))
average_precision = sklearn.metrics.average_precision_score(tl, y_score)
print('Average precision-recall score: {0:0.5f}'.format(average_precision))
plt.figure()
f_scores = np.linspace(0.2, 0.8, num=4)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
precision, recall, _ = sklearn.metrics.precision_recall_curve(tl, y_score)
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('{} {} PRC: AP={:0.5f}; Accu={}'.format(pmd, name, average_precision, accur))
plt.savefig("../Results/{}/out/{}_{}_PRC.png".format(path, name, dm))
# slide level; need prediction scores, true labels, output path, and name of the files for metrics;
# accuracy, AUROC; AUPRC.
def slide_metrics(inter_pd, path, name, fordict, pmd):
inter_pd = inter_pd.drop(['L0path', 'L1path', 'L2path', 'label', 'Prediction'], axis=1)
inter_pd = inter_pd.groupby(['slide']).mean()
inter_pd = inter_pd.round({'True_label': 0})
if pmd == 'subtype':
inter_pd['Prediction'] = inter_pd[
['POLE_score', 'MSI_score', 'Endometrioid_score', 'Serous-like_score']].idxmax(axis=1)
redict = {'MSI_score': int(1), 'Endometrioid_score': int(2), 'Serous-like_score': int(3), 'POLE_score': int(0)}
elif pmd == 'histology':
inter_pd['Prediction'] = inter_pd[
['Endometrioid_score', 'Serous_score']].idxmax(axis=1)
redict = {'Endometrioid_score': int(0), 'Serous_score': int(1)}
elif pmd == 'MSIst':
inter_pd['Prediction'] = inter_pd[
['MSS_score', 'MSI-H_score']].idxmax(axis=1)
redict = {'MSI-H_score': int(1), 'MSS_score': int(0)}
else:
inter_pd['Prediction'] = inter_pd[['NEG_score', 'POS_score']].idxmax(axis=1)
redict = {'NEG_score': int(0), 'POS_score': int(1)}
inter_pd['Prediction'] = inter_pd['Prediction'].replace(redict)
# accuracy calculations
tott = inter_pd.shape[0]
accout = inter_pd.loc[inter_pd['Prediction'] == inter_pd['True_label']]
accu = accout.shape[0]
accurr = round(accu/tott, 5)
print('Slide Total Accuracy: '+str(accurr))
if pmd == 'subtype':
for i in range(4):
accua = accout[accout.True_label == i].shape[0]
tota = inter_pd[inter_pd.True_label == i].shape[0]
try:
accuar = round(accua / tota, 5)
print('Slide {} Accuracy: '.format(fordict[i])+str(accuar))
except ZeroDivisionError:
print("No data for {}.".format(fordict[i]))
try:
outtl_slide = inter_pd['True_label'].to_frame(name='True_lable')
if pmd == 'subtype':
pdx_slide = inter_pd[['POLE_score', 'MSI_score', 'Endometrioid_score', 'Serous-like_score']].values
elif pmd == 'MSIst':
pdx_slide = inter_pd[['MSS_score', 'MSI-H_score']].values
elif pmd == 'histology':
pdx_slide = inter_pd[['Endometrioid_score', 'Serous_score']].values
else:
pdx_slide = inter_pd[['NEG_score', 'POS_score']].values
ROC_PRC(outtl_slide, pdx_slide, path, name, fordict, 'slide', accurr, pmd)
except ValueError:
print('Not able to generate plots based on this set!')
inter_pd['Prediction'] = inter_pd['Prediction'].replace(fordict)
inter_pd['True_label'] = inter_pd['True_label'].replace(fordict)
inter_pd.to_csv("../Results/{}/out/{}_slide.csv".format(path, name), index=True)
# for real image prediction, just output the prediction scores as csv
def realout(pdx, path, name, pmd):
if pmd == 'subtype':
lbdict = {1: 'MSI', 2: 'Endometrioid', 3: 'Serous-like', 0: 'POLE'}
elif pmd == 'histology':
lbdict = {0: 'Endometrioid', 1: 'Serous'}
elif pmd == 'MSIst':
lbdict = {1: 'MSI-H', 0: 'MSS'}
else:
lbdict = {0: 'negative', 1: pmd}
pdx = np.asmatrix(pdx)
prl = pdx.argmax(axis=1).astype('uint8')
prl = pd.DataFrame(prl, columns=['Prediction'])
prl = prl.replace(lbdict)
if pmd == 'subtype':
out = pd.DataFrame(pdx, columns=['POLE_score', 'MSI_score', 'Endometrioid_score', 'Serous-like_score'])
elif pmd == 'histology':
out = pd.DataFrame(pdx, columns=['Endometrioid_score', 'Serous_score'])
elif pmd == 'MSIst':
out = pd.DataFrame(pdx, columns=['MSS_score', 'MSI-H_score'])
else:
out = pd.DataFrame(pdx, columns=['NEG_score', 'POS_score'])
out.reset_index(drop=True, inplace=True)
prl.reset_index(drop=True, inplace=True)
out = pd.concat([out, prl], axis=1)
out.insert(loc=0, column='Num', value=out.index)
out.to_csv("../Results/{}/out/{}.csv".format(path, name), index=False)
# tile level; need prediction scores, true labels, output path, and name of the files for metrics; accuracy, AUROC; PRC.
def metrics(pdx, tl, path, name, pmd, ori_test=None):
# format clean up
tl = np.asmatrix(tl)
tl = tl.argmax(axis=1).astype('uint8')
pdxt = np.asmatrix(pdx)
prl = pdxt.argmax(axis=1).astype('uint8')
prl = pd.DataFrame(prl, columns=['Prediction'])
if pmd == 'subtype':
lbdict = {1: 'MSI', 2: 'Endometrioid', 3: 'Serous-like', 0: 'POLE'}
outt = | pd.DataFrame(pdxt, columns=['POLE_score', 'MSI_score', 'Endometrioid_score', 'Serous-like_score']) | pandas.DataFrame |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
pd.Timestamp('2019-08-07 00:00:00'): 2767,
pd.Timestamp('2019-08-08 00:00:00'): 2643,
pd.Timestamp('2019-08-09 00:00:00'): 1506,
pd.Timestamp('2019-08-10 00:00:00'): 1443,
pd.Timestamp('2019-08-11 00:00:00'): 2466,
pd.Timestamp('2019-08-12 00:00:00'): 3482,
pd.Timestamp('2019-08-13 00:00:00'): 1940,
pd.Timestamp('2019-08-14 00:00:00'): 1921,
pd.Timestamp('2019-08-15 00:00:00'): 3479,
pd.Timestamp('2019-08-16 00:00:00'): 3053,
pd.Timestamp('2019-08-17 00:00:00'): 2519,
pd.Timestamp('2019-08-18 00:00:00'): 2865,
pd.Timestamp('2019-08-19 00:00:00'): 1735,
pd.Timestamp('2019-08-20 00:00:00'): 1250},
'order_total_avg': {pd.Timestamp('2019-08-01 00:00:00'): 1182286.0960463749,
pd.Timestamp('2019-08-02 00:00:00'): 1341449.559055637,
pd.Timestamp('2019-08-03 00:00:00'): 1270616.0372525519,
pd.Timestamp('2019-08-04 00:00:00'): 1069011.1516039693,
pd.Timestamp('2019-08-05 00:00:00'): 1355304.7342628485,
pd.Timestamp('2019-08-06 00:00:00'): 1283968.435650978,
pd.Timestamp('2019-08-07 00:00:00'): 1319110.4787216866,
pd.Timestamp('2019-08-08 00:00:00'): 1027231.5196824896,
pd.Timestamp('2019-08-09 00:00:00'): 1201471.0717715647,
pd.Timestamp('2019-08-10 00:00:00'): 1314611.2300065856,
pd.Timestamp('2019-08-11 00:00:00'): 1186152.4565363638,
pd.Timestamp('2019-08-12 00:00:00'): 1155226.4552911327,
pd.Timestamp('2019-08-13 00:00:00'): 1346981.8930212667,
pd.Timestamp('2019-08-14 00:00:00'): 1019646.0386455443,
pd.Timestamp('2019-08-15 00:00:00'): 1286793.278547962,
pd.Timestamp('2019-08-16 00:00:00'): 1254721.8660029566,
pd.Timestamp('2019-08-17 00:00:00'): 1419237.673786449,
pd.Timestamp('2019-08-18 00:00:00'): 1173087.9508403398,
pd.Timestamp('2019-08-19 00:00:00'): 1162434.8033358732,
pd.Timestamp('2019-08-20 00:00:00'): 1046669.750923031},
'discount_rate_avg': {pd.Timestamp('2019-08-01 00:00:00'): 0.1252497888814673,
pd.Timestamp('2019-08-02 00:00:00'): 0.12950211356271726,
pd.Timestamp('2019-08-03 00:00:00'): 0.1490744307031331,
pd.Timestamp('2019-08-04 00:00:00'): 0.15162918618667656,
pd.Timestamp('2019-08-05 00:00:00'): 0.13130630218741238,
pd.Timestamp('2019-08-06 00:00:00'): 0.13373546744128126,
| pd.Timestamp('2019-08-07 00:00:00') | pandas.Timestamp |
from binance.websockets import BinanceSocketManager
from dotenv import load_dotenv
import os
from binance.client import Client
import pprint
from binance.client import Client
import plotly.graph_objects as go
import os
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from dotenv import load_dotenv
import json
# from ..utils.coinNamesList import get_coin_names_and_format,symbol_to_coin,get_symbol_base_asset_dict
import numpy as np
from .. import *
from datetime import datetime
from dash.dependencies import Input, Output
pp = pprint.PrettyPrinter(indent=4)
load_dotenv()
global df
df = pd.DataFrame(columns = ['dateTime','open','high','low','close'])
api_key = os.environ.get('api_key')
api_secret = os.environ.get('api_secret')
client = Client(api_key, api_secret)
options = get_symbol_base_asset_dict()
bm = BinanceSocketManager(client)
def get_process_message(coin):
def process_message(msg):
try:
pp.pprint(msg)
if msg['s'] == coin:
df.append(client.get_server_time()['serverTime'],msg['o'],msg['h'],msg['l'],msg['c'])
print(client.get_server_time()['serverTime'],msg['o'],msg['h'],msg['l'],msg['c'])
except Exception:
bm.close()
return process_message
bm = BinanceSocketManager(client)
conn_key = bm.start_kline_socket('BNBBTC', get_process_message('BNBBTC'), interval=Client.KLINE_INTERVAL_30MINUTE)
bm.start()
app = dash.Dash(__name__)
interval = dcc.Interval( # Update interval for grpah
id='interval-component',
interval=5000, # in milliseconds
n_intervals=0
)
app.layout = html.Div([
html.H2("Choose a Crypo Symbol from below:"),
dcc.Dropdown(
id='my-dropdown',
options=options,
value='BNBBTC'
),
dcc.Graph(id="graph"),
])
@app.callback(
Output("graph", "figure"),
[Input('my-dropdown', 'value')])
def display_candlestick(token_symbol='BNBBTC'):
candles = client.get_recent_trades(symbol=token_symbol, interval=Client.KLINE_INTERVAL_1MINUTE)
df = | pd.DataFrame(candles, columns=['dateTime', 'open', 'high', 'low', 'close', 'volume', 'closeTime', 'quoteAssetVolume', 'numberOfTrades', 'takerBuyBaseVol', 'takerBuyQuoteVol', 'ignore']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameSubclassing:
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return "OK"
class CustomDataFrame(DataFrame):
"""
Subclasses pandas DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return "OK"
data = {"col1": range(10), "col2": range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
assert isinstance(cdf_series, CustomSeries)
assert cdf_series.custom_series_function() == "OK"
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
assert isinstance(cdf_rows, CustomDataFrame)
assert cdf_rows.custom_frame_function() == "OK"
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([("A", "A"), ("A", "B")])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi["A"], CustomDataFrame)
mcol = pd.MultiIndex.from_tuples([("A", ""), ("B", "")])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi2["A"], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"]
)
df.testattr = "XXX"
assert df.testattr == "XXX"
assert df[["X"]].testattr == "XXX"
assert df.loc[["a", "b"], :].testattr == "XXX"
assert df.iloc[[0, 1], :].testattr == "XXX"
# see gh-9776
assert df.iloc[0:1, :].testattr == "XXX"
# see gh-10553
unpickled = tm.round_trip_pickle(df)
tm.assert_frame_equal(df, unpickled)
assert df._metadata == unpickled._metadata
assert df.testattr == unpickled.testattr
def test_indexing_sliced(self):
# GH 11559
df = tm.SubclassedDataFrame(
{"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"]
)
res = df.loc[:, "X"]
exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[:, 1]
exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc[:, "Z"]
exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["a", :]
exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[1, :]
exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc["c", :]
exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c")
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
def bar(self):
return self.i_dont_exist
with pytest.raises(AttributeError, match=".*i_dont_exist.*"):
A().bar
def test_subclass_align(self):
# GH 12983
df1 = tm.SubclassedDataFrame(
{"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")
)
df2 = tm.SubclassedDataFrame(
{"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD")
)
res1, res2 = df1.align(df2, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = tm.SubclassedDataFrame(
{"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]},
index=list("ABCDE"),
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp2)
res1, res2 = df1.a.align(df2.c)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp1.a)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2.c)
def test_subclass_align_combinations(self):
# GH 12983
df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = tm.SubclassedDataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
# name is lost when
exp2 = tm.SubclassedSeries(
[1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x"
)
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp2)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp1)
def test_subclass_iterrows(self):
# GH 13977
df = tm.SubclassedDataFrame({"a": [1]})
for i, row in df.iterrows():
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
def test_subclass_stack(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["a", "b", "c"],
columns=["X", "Y", "Z"],
)
res = df.stack()
exp = tm.SubclassedSeries(
[1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")]
)
tm.assert_series_equal(res, exp)
def test_subclass_stack_multi(self):
# GH 15564
df = tm.SubclassedDataFrame(
[[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],
index=MultiIndex.from_tuples(
list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"]
),
columns=MultiIndex.from_tuples(
list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"]
),
)
exp = tm.SubclassedDataFrame(
[
[10, 12],
[11, 13],
[20, 22],
[21, 23],
[30, 32],
[31, 33],
[40, 42],
[41, 43],
],
index=MultiIndex.from_tuples(
list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))),
names=["aaa", "ccc", "yyy"],
),
columns=Index(["W", "X"], name="www"),
)
res = df.stack()
tm.assert_frame_equal(res, exp)
res = df.stack("yyy")
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 00:00:25 2019
@author: Dell
"""
'''
The absolute price oscillator is a class of indicators that
builds on top of moving averages of prices to capture specific short-term deviations in
prices.
The absolute price oscillator is computed by finding the difference between a fast
exponential moving average and a slow exponential moving average. Intuitively, it is
trying to measure how far the more reactive EMA is deviating from the more
stable EMA. A large difference is usually interpreted as one of two things:
instrument prices are starting to trend or break out, or instrument prices are far away from
their equilibrium prices, in other words, overbought or oversold:
This is an implementation of the absolute price oscillator, with the faster EMA using a period of 10
days and a slower EMA using a period of 40 days, and default smoothing factors being 2/11
and 2/41, respectively, for the two EMAs.
'''
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
#Create variables for fast EMA
nFast=10#umOfPeriodsForFast
'''
the shorter the time period,
the more reactive the EMA is to new price observations; in other words, the EMA
converges to new price observations faster and forgets older observations faster, also
referred to as Fast EMA.
'''
smoothingConstantFast=2/(nFast+1)
EMApriceFast=0
#Create variables for Slow EMA
nSlow=40
'''
The longer the time period, the less reactive the EMA is to new
price observations; that is, EMA converges to new price observations slower and forgets
older observations slower, also referred to as Slow EMA.
'''
smoothingConstantSlow=2/(nSlow+1)
EMApriceSlow=0
#Create lists to hold newly calculated values
EMAfastValues=[]
EMAslowValues=[]
APO=[]
#download data
data=yf.download('GOOG',start='2015-9-1',end='2018-11-11')
adjustedClose=data['Adj Close']
#Calulate fast and close prices
for price in adjustedClose:
if EMApriceFast==0:
EMApriceFast=price
EMApriceSlow=price
else:
EMApriceFast=(price - EMApriceFast) * smoothingConstantFast + EMApriceFast
EMApriceSlow=(price - EMApriceSlow) * smoothingConstantSlow + EMApriceSlow
EMAfastValues.append(EMApriceFast)
EMAslowValues.append(EMApriceSlow)
APO.append(EMApriceFast-EMApriceSlow)
#create dataframe of values
df= | pd.DataFrame(adjustedClose,index=data.index) | pandas.DataFrame |
import logging
import pathlib
import numpy as np
import pandas as pd
import coloredlogs
from pathlib import Path
from typing import Union,Dict,List
from .utils import sel_column_label, train_val_test_split, save_csv, flair_tags, flair_tags_as_string
from flair.datasets import CSVClassificationCorpus
# logger = logging.getLogger("nlp_dataset")
# logger.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
# logger.addHandler(stream_handler)
# coloredlogs.install(fmt='%(asctime)s %(name)s %(levelname)s %(message)s',level='DEBUG',logger = logger)
logger = logging.getLogger("entiretydotai")
class FlairDataset():
"""[summary]
Raises:
FileNotFoundError: [description]
Returns:
[type]: [description]
"""
def __init__(self,
data_folder: Union[str, Path],
column_name_map: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
file_format=None,
delimiter = None,
encoding: str = "utf-8",
train_data: pd.DataFrame = None,
val_data: pd.DataFrame = None,
test_data : pd.DataFrame = None):
super().__init__()
self.data_folder = data_folder
self.column_name_map = column_name_map
self.train_file = train_file
self.test_file = test_file
self.dev_file = dev_file
self.file_format = file_format
self.delimiter = delimiter
self.processed_file = None
if self.file_format == '.csv':
logger.debug(f'Loading data in Flair CSVClassificationCorpus from path :{self.data_folder}')
self.corpus = CSVClassificationCorpus(
data_folder=self.data_folder,
train_file=self.train_file,
dev_file=self.dev_file,
test_file=self.test_file,
column_name_map=self.column_name_map,
delimiter=self.delimiter)
logger.debug(f'Number of Sentences loaded[Train]:{self.corpus.train.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.train.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.train.__getitem__(1)}\n')
logger.debug(f'Number of Sentences loaded[Valid]:{self.corpus.dev.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.dev.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.dev.__getitem__(1)}\n')
logger.debug(f'Number of Sentences loaded[Test]:{self.corpus.test.total_sentence_count}')
logger.debug(f'Type of tokenizer:{self.corpus.test.tokenizer.__name__}')
logger.debug(f'Sample sentence and Label from [Train]:{self.corpus.test.__getitem__(1)}\n')
self.train_data = train_data
self.valid_data = val_data
self.test_data = test_data
@classmethod
def csv_classification(cls,
data_folder=Union[str, Path],
file_format: str = 'csv',
filename: str = 'data',
train_val_test_split_flag: str = True,
column_mapping: List = None,
val_split_size: List = [0.1, 0.1]):
p = Path(data_folder).resolve()
if p.is_dir():
logger.debug(f'Found directory : {p}')
files = list(p.rglob('*.'+file_format))
logger.debug(f'Number of files found {len(files)}')
if len(files) < 2:
logger.debug(f'Found 1 file : {files[0].name}')
train_val_test_split_flag = True
logger.debug("Setting train_val_test_split_flag to True")
if train_val_test_split_flag:
if files[0].stem.lower() == filename:
train_file = files[0].name
flair_mapping = ['text','label']
df, column_name_map = sel_column_label(files[0],
column_mapping,
flair_mapping)
logger.debug(f'[column_name_map] {column_name_map}')
train, valid, test = train_val_test_split(df, val_split_size)
path_to_save = Path(p.parent.parent/'interim')
save_csv(train, path_to_save, 'train')
save_csv(valid, path_to_save, 'valid')
save_csv(test, path_to_save, 'test')
return FlairDataset(data_folder=path_to_save,
column_name_map=column_name_map,
train_file='train.csv',
test_file='test.csv',
dev_file='valid.csv',
file_format='.csv',
delimiter=",",
train_data=train,
val_data=valid,
test_data=test)
else:
raise FileNotFoundError
else:
raise NotImplementedError
else:
pass
class FlairTagging():
def __init__(self, dataset: CSVClassificationCorpus = None):
super().__init__()
self.dataset = dataset
@property
def list_ner_tags():
'''
List all ner- pos models available in Flair Package'''
raise NotImplementedError
def __repr__(self, tokenizer=None):
if tokenizer is None:
text = self.train_data.text[0]
tokens = str(text).split(" ")
return f'Text: {text} Tokens: {tokens}'
def add_tags(self, model: Union[str, Path] = 'ner-fast',
tag_type: str = 'ner', col_name: str = 'text',
extract_tags: bool = False, return_score: float = False,
replace_missing_tags: bool =True, missing_tags_value: str = "NA",
replace_missing_score: bool = True,
missing_score_value: np.float = np.NaN):
test = self.dataset.train_data.reset_index(drop=True).loc[:10,:].copy()
logger.debug(f'Shape of the dataframe:{test.shape}')
text = test[col_name].values
if extract_tags:
if return_score:
corpus_text, corpus_cleaned_ner_tag, corpus_score = flair_tags(
text,
model,
tag_type,
extract_tags,
return_score)
df = pd.concat([test.reset_index(drop=True),
pd.Series(corpus_text, name='tokenize_text'),
pd.Series(corpus_cleaned_ner_tag, name='tags'),
pd.Series(corpus_score, name='score')],
axis=1,
ignore_index=True)
return df
else:
corpus_text,corpus_cleaned_ner_tag = flair_tags(text,
model,
tag_type,
extract_tags,
return_score)
df = pd.concat([test.reset_index(drop=True),
| pd.Series(corpus_text,name='tokenize_text') | pandas.Series |
import pandas as pd
import numpy as np
from ext.utils_model import calc_xgb, calc_ols
ONEHOT_MAX_UNIQUE_VALUES = 20
BIG_DATASET_SIZE = 500 * 1024 * 1024
def get_mem(df):
mem = df.memory_usage().sum() / 1000000
return f'{mem:.2f}Mb'
def transform_datetime_features(df):
if df.shape[1] == 0:
return df
else:
datetime_columns = df.columns.values
res_date_cols = []
for col_name in datetime_columns:
df[col_name] = pd.to_datetime(df[col_name])
year = f'date_year_{col_name}'
month = f'date_month_{col_name}'
weekday = f'date_weekday_{col_name}'
day = f'date_day_{col_name}'
hour = f'date_hour_{col_name}'
df[year] = df[col_name].dt.year
df[month] = df[col_name].dt.month
df[weekday] = df[col_name].dt.weekday
df[day] = df[col_name].dt.day
df[hour] = df[col_name].dt.hour
res_date_cols += [year, month, weekday, day, hour]
return df
def transform_categorigical_features(df, freq=None):
if df.shape[1] == 0:
return df, None
else:
cat_columns = df.columns.values
if freq is None:
out_freq = {col: (df[col].value_counts()/df.shape[0]).to_dict() for col in cat_columns}
else:
out_freq = freq
for col in cat_columns:
col_name = f'category_{col}'
df[col_name] = df[col].map(out_freq[col])
return df, out_freq
def load_test_label(path):
y = pd.read_csv(path, low_memory=False).target
return y
def load_data(path, mode='train'):
is_big = False
if mode == 'train':
df = pd.read_csv(path, low_memory=False)
df.set_index('line_id', inplace=True)
line_id = pd.DataFrame(df.index)
y = df.target
df = df.drop('target', axis=1)
df['is_test'] = 0
is_test = df.is_test
df = df.drop('is_test', axis=1)
if df.memory_usage().sum() > BIG_DATASET_SIZE:
is_big = True
else:
df = pd.read_csv(path, low_memory=False)
df.set_index('line_id', inplace=True)
line_id = | pd.DataFrame(df.index) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
| assert_series_equal(cp, exp) | pandas.util.testing.assert_series_equal |
#!/opt/anaconda3/bin/python -u
import getopt
import os.path
import sys
import pandas as pd
import numpy as np
from time import sleep
from datetime import datetime, timedelta
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from common.lib.log import debug, error, fatal, info, warn
from common.lib.cal import business_days
from common.lib.db import query_mysql
from common.lib.sym import local_hk_symbology
from data.lib.portfolios import set_reports, set_corporate_actions, get_portfolio, get_dividends
from data.lib.prices import get_equity_prices_rt
from data.lib.products import get_products
def print_usage():
print (" Usage: %s [options]" % (os.path.basename(__file__)))
print (" Options:")
print (" \t-c, --exchcode\t\texchange code")
print (" \t-d, --database\t\tdatabase connection string")
print (" \t-p, --portfolio\t\tportfolio name")
print (" \t-s, --start\t\tstart date")
print (" \t-e, --end\t\tend date")
print (" \t-r, --dryrun\t\tdry run")
print (" \t-h,\t\t\thelp")
def format_time(time):
today = datetime.strftime(datetime.now(), "%Y%m%d")
return datetime.strptime(today + "T" + time, "%Y%m%dT%H:%M:%S")
def init_sod(sDate, sPortfolio, dbConn, exchCode, dryRun):
days = business_days(sDate - timedelta(days=10), sDate - timedelta(days=1), exchCode)
positions = get_portfolio(sPortfolio, days[-1], dbConn)
# positions = positions[positions['eodqty'] != 0] # remove empty positions
# set today's trade date
positions['date'] = sDate.strftime('%Y-%m-%d')
# carry over eod to sod
positions['sodqty'] = positions['eodqty']
positions['sodnot'] = positions['eodnot']
# zero out entries
positions[['buyqty','sellqty','eodqty']] = 0
positions[['buynot','sellnot','eodnot','grosspnl','netpnl','comms','divs']] = 0.
# prepare to submit entries
if positions.shape[0] > 0:
dvd = get_dividends(sDate, sDate)
dvd = dvd[['dividend','split']]
posdvd = positions.join(dvd, how='inner').copy(deep=True)
if posdvd.shape[0] > 0:
# adjustment required
posdvd['dividend'] = posdvd['dividend'].fillna(0.)
posdvd['split'] = posdvd['split'].fillna(1.)
posdvd['oldqty'] = posdvd['sodqty']
for index, row in posdvd.iterrows():
info("Adjusting div/split %s, div=%f, split=%f" % (index, row['dividend'], row['split']))
positions.ix[index, 'divs'] = positions.ix[index, 'sodqty'] * row['dividend']
positions.ix[index, 'sodqty'] = positions.ix[index, 'sodqty'] * (1./row['split'])
# record adjustment for records
posdvd.ix[index, 'cashadj'] = positions.ix[index, 'sodqty'] * row['dividend']
posdvd.ix[index, 'newqty'] = positions.ix[index, 'sodqty'] * (1./row['split'])
# prepare insert to portfolios/corpactions database
set_corporate_actions(posdvd, dbConn, dryrun=dryRun)
else:
info("No adjustments found")
# prepare insert to portfolios/reports database
set_reports(positions, dbConn, dryrun=dryRun)
def init_eod(eDate, ePortfolio, dbConn, exchCode, dryRun):
func_buy_qty = lambda x: x['execqty'] if x['side'] in ['B','C'] else 0.
func_sell_qty = lambda x: x['execqty'] if x['side'] in ['S','H'] else 0.
reports_query = "select * from report where portfolio = '%s' and date = '%s'" % (ePortfolio, eDate.strftime('%Y-%m-%d'))
trades_query = "select * from trades where portfolio = '%s' and date = '%s'" % (ePortfolio, eDate.strftime('%Y-%m-%d'))
reports = query_mysql(dbConn, reports_query, verbose=True)
info("%d report record(s) found" % (reports.shape[0]))
reports = reports.set_index(['portfolio','ticker'])
trades = query_mysql(dbConn, trades_query, verbose=True)
info("%d trades record(s) found" % (trades.shape[0]))
if trades.shape[0] > 0:
trades['tbuyqty'] = trades.apply(func_buy_qty, axis=1)
trades['tbuynot'] = trades['tbuyqty'] * trades['avgpx'] * trades['mult']
trades['tsellqty'] = trades.apply(func_sell_qty, axis=1)
trades['tsellnot'] = trades['tsellqty'] * trades['avgpx'] * trades['mult']
trades['tcomms'] = trades['comms']
trades_totals = trades.groupby(['portfolio','ticker'])['tbuyqty','tbuynot','tsellqty','tsellnot','tcomms'].sum()
else:
trades_totals = | pd.DataFrame(columns=['portfolio','ticker','tbuyqty','tbuynot','tsellqty','tsellnot','tcomms']) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import *
def load_data(messages_filepath, categories_filepath):
""" Takes two input csv files, merges it and returns the combined dataframe """
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = | pd.merge(messages, categories, on='id', how='outer') | pandas.merge |
#! /usr/bin/env python3
import json
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
def sim_plot(popin=None, confin=None, outpng='', city='N/A'):
# working mode
if popin == None and confin != None:
with open(confin) as cin:
config = json.load(cin)
start_date = config['start_date']
stop_date = config['stop_date']
datetime_format = '%Y-%m-%d %H:%M:%S'
start = datetime.strptime(start_date, datetime_format)
stop = datetime.strptime(start_date, datetime_format)
midn_start = start.replace(hour=0, minute=0, second=0)
sources = {}
cnt_io = 0
if 'sources' in config:
src = config['sources']
for k,v in src.items():
rates = v['creation_rate']
dt = (24 * 60 * 60) / len(rates)
times = [ midn_start + timedelta(seconds=i*dt) for i in range(len(rates)) ]
sources[k] = [times, rates]
print(k, len(rates))
if (k!="LOCALS" and k.endswith("_w")): cnt_io += max(rates)
print(max(rates), sum(rates))
print(cnt_io)
dt = 300
n = 24 * 60 * 60 // dt
ts = [ midn_start + timedelta(seconds=i*dt) for i in range(n) ]
df = pd.DataFrame(ts)
df.index = pd.to_datetime(df[0], unit='s')
dt = 300
n = 24 * 60 * 60 // dt
ts = [ midn_start + timedelta(seconds=i*dt) for i in range(n) ]
df = | pd.DataFrame(ts) | pandas.DataFrame |
import numpy as np
import pandas as pd
from tqdm import tqdm
np.random.seed(0)
mv_to_spe = 0.1
time_range_in_ns = np.arange(0, 100, 0.5) # time in ns
pulse_pos_in_ns = 25
integration_window_len = 30 # slices
extraction_window_start = 35 # slice index
extraction_window_len = 90 # slices
time_extraction_window = time_range_in_ns[extraction_window_start:extraction_window_start+extraction_window_len]
def fact_pulse_in_mV(x, x0=0):
p = (1.239*(1-1/(1+np.exp(((x-x0)-2.851)/1.063)))*np.exp(-((x-x0)-2.851)/19.173))
p *= 10 # 1 spe (10mV)
return p
def basic_extraction(data):
maxPos = np.argmax(data)
maxHalf = data[maxPos] / 2.
half_pos = np.where(data[:maxPos+1] < maxHalf)[0]
if len(half_pos):
half_pos = half_pos[-1]
else:
half_pos = extraction_window_start
integral = data[half_pos:half_pos+30].sum()
return {
'arrivalTime': time_range_in_ns[extraction_window_start + half_pos],
'integral': integral,
}
def basic_extraction_normalized(data):
be = basic_extraction(data)
be['integral'] /= gain
be['arrivalTime'] -= true_arrival_time
return be
window_pulse = fact_pulse_in_mV(time_extraction_window, pulse_pos_in_ns)
gain = basic_extraction(window_pulse)['integral']
true_arrival_time = basic_extraction(window_pulse)['arrivalTime']
df = | pd.DataFrame(columns=['noise', 'offset', 'reconstruction_rate']) | pandas.DataFrame |
from datetime import datetime, timedelta
from pandas import json
from api.decorators import api_post, api_get
from api.helper import json_response, json_error_response
from api.utils import int_or_none
from broker.models import BrokerVehicle, Broker
from fms.decorators import authenticated_user
from fms.views import get_or_none
from owner.models import Vehicle
from supplier.helper import compare_format
from team.models import ManualBooking
from team.helper.helper import to_int
from transaction.models import VehicleAllocated, Transaction
from django.contrib.auth.models import User
import pandas as pd
from owner.vehicle_util import display_format
@api_post
@authenticated_user
def booking_history_data(request):
broker = Broker.objects.get(name=User.objects.get(username=request.user.username))
broker_vehicle_ids = BrokerVehicle.objects.filter(broker=broker).values_list('vehicle_id', flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime', 'id',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number', 'lr_number')
transaction_data = [{'id': v['id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
@api_post
@authenticated_user
def vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = get_or_none(Vehicle, id=vehicle_id)
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
broker_vehicle_ids = BrokerVehicle.objects.filter(vehicle=vehicle).values_list(
'vehicle_id',
flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number',
'lr_number')
transaction_data = [{'id': v['transaction_id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
else:
vehicle = Vehicle()
@api_post
@authenticated_user
def mb_vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = int_or_none(get_or_none(Vehicle, id=vehicle_id))
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
data = []
for booking in ManualBooking.objects.filter(
lorry_number__in=[display_format(compare_format(vehicle.vehicle_number))]).order_by(
'-shipment_date'):
if to_int(booking.total_amount_to_owner - booking.total_out_ward_amount) != 0:
data.append(
{
'status': 'unpaid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'transaction_id': booking.booking_id
}
)
else:
data.append(
{
'status': 'paid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'final_payment_date': final_payment_date(booking=booking),
'transaction_id': booking.booking_id
}
)
return json_response({'status': 'success', 'data': data})
def get_allocated_vehicle(request):
data = json.loads(request.body)
transaction = Transaction.objects.get(transaction_id=data['transaction_id'])
allocated_vehicle_list = []
for value in transaction.allocated_vehicle.all():
temp = []
temp.append(value.vehicle_number.vehicle_type.vehicle_type + ", " + value.vehicle_number.vehicle_type.capacity)
temp.append(value.vehicle_number.vehicle_number)
temp.append(value.vehicle_number.driver.driving_licence_number)
temp.append(value.vehicle_number.driver.name)
temp.append(value.vehicle_number.driver.phone)
allocated_vehicle_list.append(temp)
df_allocated = pd.DataFrame(allocated_vehicle_list,
columns=['vehicle_type', 'vehicle_number', 'driving_licence', 'driver_name',
'driver_phone'])
data_allocated = df_allocated.reset_index().to_json(orient='records')
data_allocated = json.loads(data_allocated)
return data_allocated
def loading_unloading_points(request):
data = json.loads(request.body)
transaction = Transaction.objects.get(transaction_id=data['transaction_id'])
locations = transaction.loading_unloading_location.all()
loading_list = []
unloading_list = []
for value in locations:
temp1 = []
temp2 = []
if value.type == 'loading':
temp1.append(value.address)
temp1.append(value.city.name)
loading_list.append(temp1)
elif value.type == 'unloading':
temp2.append(value.address)
temp2.append(value.city.name)
unloading_list.append(temp2)
df_loading = pd.DataFrame(loading_list, columns=['address', 'city'])
loading_details = df_loading.reset_index().to_json(orient='records')
loading_json = json.loads(loading_details)
df_unloading = | pd.DataFrame(unloading_list, columns=['address', 'city']) | pandas.DataFrame |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = | Series(values, name="b", index=result.index) | pandas.Series |
"""Helper classes and functions with RTOG studies.
"""
import random
import pandas as pd
import numpy as np
import pickle
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import pint
# Constants defining variable and file parsing
from rtog_constants import gcp_baseline_paths, rtog_endpoints, rtog_binary_mapping, rtog_unknown_class_X
from rtog_constants import rtog_default_class_y, rtog_text_fields, rtog_field_mapping, rtog_categorical_fields
# Functions allowing RTOG data manipulation
from rtog_constants import is_categorical, merge, serum_values_to_ng_dl
def rtog_from_study_number(study_number, create_endpoints=True, standardize=False):
"""Helper function. Loads an RTOG object given the study number (str)."""
study_path = gcp_baseline_paths[study_number]
rtog = RTOG(filename=study_path, study_number=study_number, file_type='excel', create_endpoints=create_endpoints)
if standardize:
rtog.standardize_rx()
rtog.standardize_race()
rtog.standardize_gleason_scores()
rtog.standardize_tstage()
rtog.standardize_pelvic_rt()
rtog.standardize_prostate_dose()
rtog.standardize_rt_complete()
rtog.standardize_biochemical_failure()
rtog.standardize_disease_specific_survival()
rtog.cause_of_death()
# rtog_object.standardize_baseline_serum() # Note: this line takes a long time to run, due to unit conversions. Also Osama said the data is too noisy to use.
rtog.standardize_unknown_values_in_predictor_variables() # note: this must be done after standardize_rt_complete, bc that re-sets some unknown vars. This replaces the 'unknown' classes with nans, so that boosting can intelligently impute.
print("Loaded RTOG {}, Standardized={}".format(study_number, standardize))
return rtog
class RTOG(object):
def __init__(self, filename=None, study_number=None, file_type="excel", create_endpoints=True):
self.filename = filename
self.df = None
self.study_number = study_number
# Load Endpoints, Default Classes (for y), and Unknown Classes (for X).
if self.study_number in rtog_endpoints:
self.endpoints = rtog_endpoints[study_number]
if self.study_number in rtog_default_class_y:
self.default_class_y = rtog_default_class_y[study_number]
if self.study_number in rtog_unknown_class_X:
self.unknown_class_X = rtog_unknown_class_X[study_number]
# Load Data.
if self.filename is not None:
if file_type == "excel":
self.df = pd.read_excel(filename)
elif file_type == "csv":
self.df = pd.read_csv(filename, index_col=0)
self._field_fix()
self.table_sort()
# Study-specific additional derived endpoints get hardcoded here
if study_number == '9202':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9202/All_RT_Data_9202.xlsx"
self.df_rt = | pd.read_excel(gcp_path) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 18:17:07 2021
@author: alber
"""
import os
import pandas as pd
import numpy as np
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pickle
import lightgbm as lgb
from os import walk
from scipy import stats
from statsmodels.stats.power import TTestIndPower
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
df_metrics_h_test = pd.DataFrame()
### Sample Size
# parameters for power analysis
effect = 0.8
alpha = 0.1 # Ojo al alpha, que no es 0.5
power = 0.8
# perform power analysis
analysis = TTestIndPower()
result = analysis.solve_power(effect, power=power, nobs1=None, ratio=1.0, alpha=alpha)
print('Sample Size: %.3f' % result)
df_kappa_limits = pd.DataFrame(
{
'limit_k': [0, 0.2, 0.4],
'category': ['poor', 'slight', 'fair']
}
)
# =============================================================================
# Best Models based on CV
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
### Load CV - Psychological
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x, encoding="latin-1") for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_raw = df_results_aff_cv
df_raw = (
df_raw
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_raw = df_raw.merge(df_names, how="left").round(2)
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
df_raw = (
df_raw
.merge(df_names, how="left")
.drop(columns=['es_name'])
)
### Get the metrics per emotion tag
df_results_aff = (
df_raw
.groupby(by=['category', 'regression_model', 'semantic_model'])
.mean()
.reset_index()
)
df_results_aff['mean_metric'] = (
(df_results_aff['kappa']+
df_results_aff['auc'])
/
2
)
df_median_ref = (
df_results_aff
.groupby(by=['regression_model', 'semantic_model'])
.median()
.reset_index()
.copy()
[['regression_model', 'semantic_model', 'f1_weighted', 'kappa', 'auc', 'corr']]
.rename(columns={
'f1_weighted': 'f1_weighted_median',
'kappa': 'kappa_median',
'auc': 'auc_median',
'corr': 'corr_median'
})
)
df_results_aff = df_results_aff[df_results_aff['auc']>0.5]
df_results_aff = df_results_aff[df_results_aff.fillna(0)['corr']>=0]
# Remove baselines
df_results_aff = df_results_aff[
(df_results_aff['regression_model'] != 'class_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'class_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_knn') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_rbf') &
(df_results_aff['regression_model'] != 'class_dummy_classifier') &
(df_results_aff['regression_model'] != 'reg_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'reg_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'reg_label_spreading_base') &
(df_results_aff['regression_model'] != 'reg_dummy_classifier')
].copy()
# Remove unused semantic models
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
df_results_aff = df_results_aff[
df_results_aff['semantic_model'].isin(list_semantic_models)]
df_results_aff = (
df_results_aff
.sort_values(by=['category', 'mean_metric'], ascending=False)
.groupby(by=['category'])
.first()
.reset_index()
)
df_results_aff = (
df_results_aff.merge(df_names, how="left").drop(columns=['es_name'])
)
df_results = df_results_aff[[
'en_name', 'semantic_model', 'regression_model',
'f1_weighted', 'kappa', 'auc', 'corr'
]].copy().round(2)
df_reference = df_results
# df_reference = df_results[[
# 'en_name', 'semantic_model', 'classification_model',
# 'f1_weighted', 'kappa', 'auc'
# ]].copy().round(2)
df_reference = df_reference.merge(df_median_ref, how="left")
### Add data distribution
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
list_kfolds = []
n_folds = 21
for i in range(n_folds):
df_gt = pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
df_gt = df_gt.rename(columns={"text": "text_original"})
df_gt.columns = [str(x).rstrip().lstrip() for x in list(df_gt.columns)]
df_add = pd.DataFrame()
for category in list_names:
if category in list_aff:
continue
try:
df_iter = df_gt.groupby(category).apply(lambda s: s.sample(2))
except:
continue
df_add = df_add.append(df_iter)
df_add = df_add.drop_duplicates()
# New GT (without data used in training)
df_gt = df_gt[~df_gt["index"].isin(df_add["index"])].copy()
## Check no affective feature categories are missing
for category in list_aff:
l1 = list(df_add[category].unique())
l2 = list(df_gt[category].unique())
if len(l1)<len(l2):
l3 = [x for x in l2 if x not in l1]
df_add_new = df_gt[df_gt[category].isin(l3)]
df_add_new = df_add_new.drop_duplicates(subset=category)
df_add = df_add.append(df_add_new)
df_gt = df_gt[~df_gt["index"].isin(df_add_new["index"])].copy()
list_kfolds.append([{i: {'df_gt': df_gt, 'df_add': df_add}}])
df_distribution = pd.DataFrame()
for iter_item in list_kfolds:
iter_item = [x for x in iter_item[0].values()][0]['df_gt']
for category in list_aff:
data_cat = (
pd.DataFrame(iter_item[category].copy().value_counts())
.T
.reset_index()
.rename(columns={'index':'en_name'})
)
df_distribution = df_distribution.append(data_cat)
df_distribution = df_distribution.groupby(by=['en_name']).mean().reset_index().round(1)
df_distribution = df_distribution.replace("fear", "Fear (ordinal)")
df_distribution = df_distribution.replace("happinness", "happiness")
df_reference = df_distribution.merge(df_reference)
df_reference.round(2).to_csv(
"tables_paper/df_results_emotions_reference.csv", index=False)
# =============================================================================
# Differences vs. Baselines
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_raw = df_results_aff_cv
df_raw = df_raw.merge(df_names, how="left").drop(columns=['es_name'])
# Set missing SMOTE models as non-SMOTE results
df_aux = df_raw[(df_raw['regression_model']=='class_baseline_lightgbm') &
(df_raw['category']=='happinness')
].copy()
df_aux['regression_model'] = 'class_baseline_smote_lightgbm'
df_raw = df_raw.append(df_aux)
df_aux = df_raw[(df_raw['regression_model']=='class_baseline_lightgbm') &
(df_raw['category']=='fear')
].copy()
df_aux['regression_model'] = 'class_baseline_smote_lightgbm'
df_raw = df_raw.append(df_aux)
list_baselines = [
'class_baseline_lightgbm',
'class_baseline_smote_lightgbm',
'class_dummy_classifier',
'reg_baseline_lightgbm'
]
# Iter and get results
df_metrics = pd.DataFrame()
for i, row in df_reference.iterrows():
for baseline in list_baselines:
df_1 = df_raw[
(df_raw['semantic_model']==row['semantic_model']) &
(df_raw['regression_model']==row['regression_model']) &
(df_raw['en_name']==row['en_name'])
]
df_2 = df_raw[
(df_raw['semantic_model']==row['semantic_model']) &
(df_raw['regression_model']==baseline) &
(df_raw['en_name']==row['en_name'])
]
list_f1_df1 = list(df_1['f1_weighted'].values)
list_f1_df2 = list(df_2['f1_weighted'].values)
list_kappa_df1 = list(df_1['kappa'].values)
list_kappa_df2 = list(df_2['kappa'].values)
list_auc_df1 = list(df_1['auc'].values)
list_auc_df2 = list(df_2['auc'].values)
list_corr_df1 = list(df_1['corr'].values)
list_corr_df2 = list(df_2['corr'].values)
try:
_, pVal_f1 = stats.kruskal(list_f1_df1, list_f1_df2)
_, pVal_kappa = stats.kruskal(list_kappa_df1, list_kappa_df2)
_, pVal_auc = stats.kruskal(list_auc_df1, list_auc_df2)
_, pVal_corr = stats.kruskal(list_corr_df1, list_corr_df2)
except:
pVal_f1 = 1
pVal_kappa = 1
pVal_auc = 1
pVal_corr = 1
df_metrics_iter = pd.DataFrame(
{'category': [row['en_name']],
'semantic_model': [row['semantic_model']],
'prediction_model_1': [row['regression_model']],
'prediction_model_2': [baseline],
'mean_1_f1': [np.mean(list_f1_df1)],
'mean_2_f1': [np.mean(list_f1_df2)],
'median_1_f1': [np.median(list_f1_df1)],
'median_2_f1': [np.median(list_f1_df2)],
'p-value_f1': [pVal_f1],
'mean_1_kappa': [np.mean(list_kappa_df1)],
'mean_2_kappa': [np.mean(list_kappa_df2)],
'median_1_kappa': [np.median(list_kappa_df1)],
'median_2_kappa': [np.median(list_kappa_df2)],
'p-value_kappa': [pVal_kappa],
'mean_1_auc': [np.mean(list_auc_df1)],
'mean_2_auc': [np.mean(list_auc_df2)],
'median_1_auc': [np.median(list_auc_df1)],
'median_2_auc': [np.median(list_auc_df2)],
'p-value_auc': [pVal_auc],
'mean_1_corr': [np.mean(list_corr_df1)],
'mean_2_corr': [np.mean(list_corr_df2)],
'median_1_corr': [np.median(list_corr_df1)],
'median_2_corr': [np.median(list_corr_df2)],
'p-value_corr': [pVal_corr],
}
)
df_metrics = df_metrics.append(df_metrics_iter)
df_metrics.round(2).to_csv(
"tables_paper/df_results_emotions_cv_vs_baseline.csv", index=False)
# Plot Data
df_aux = (df_metrics[['category', 'prediction_model_1', 'mean_1_auc', 'p-value_auc']]
.rename(columns={
'prediction_model_1':'prediction_model',
'mean_1_auc':'mean_auc'})
)
df_aux['prediction_model'] = 'best reference'
df_plot = (
df_metrics[['category', 'prediction_model_2', 'mean_2_auc', 'p-value_auc']]
.rename(columns={'prediction_model_2':'prediction_model',
'mean_2_auc':'mean_auc'})
.append(df_aux)
)
df_plot = df_plot[df_plot['prediction_model']!='class_dummy_classifier']
df_plot = df_plot[df_plot['prediction_model']!='reg_dummy_classifier']
df_plot = df_plot[df_plot['prediction_model']!='reg_baseline_lightgbm']
df_plot = df_plot[df_plot['prediction_model']!='baseline_affective']
df_plot = df_plot.replace("Fear (ordinal)", "fear (ordinal)")
df_plot = df_plot.replace("anger", "anger (ordinal)")
plt.figure(figsize=(16, 10), dpi=250)
sns.set_theme(style="darkgrid")
sns.set(font_scale=1.2)
plot_fig = sns.barplot(data = df_plot,
x = 'category',
y = 'mean_auc',
hue = 'prediction_model'
)
plot_fig.set(
ylabel = "AUC Value",
xlabel = 'Psychological Category'
)
plot_fig.set_title(
"AUC metrics versus baseline models",
fontdict = {'fontsize':16},
pad = 12
)
plt.ylim(0, 0.9)
plot_fig.set_xticklabels(
plot_fig.get_xticklabels(), rotation=45, horizontalalignment='right')
# plot_fig.set_yticklabels(
# plot_fig.get_yticklabels(), rotation=360, horizontalalignment='right')
plt.legend(loc="upper left")
plt.savefig('results/df_plot_emotions_metrics_vs_baseline.png', dpi=250)
plt.show()
### Analysis (manual)
df_analysis = df_plot[(df_plot['p-value_auc']>0.1) & (df_plot['prediction_model']!='best reference')].copy()
# =============================================================================
# Differences vs. original DISCO (Emotions)
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions - All
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_results_aff_cv = (
df_results_aff_cv
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_results_aff_cv = df_results_aff_cv.merge(df_names, how="left").round(2).drop(columns=['es_name'])
### Load CV - Psychological - DISCO
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_DISCO"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_disco_cv = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 17:35:50 2018
@author: amal
"""
from __future__ import division
import os.path
import pandas as pd
import numpy as np
from datetime import datetime
from geopy.distance import vincenty
import matplotlib.pyplot as plt
import seaborn as sns
import json
import urllib.request as urllib2
from sklearn import preprocessing
from plotly.offline import plot
import plotly.graph_objs as go
def fetch_one_week_data(URL):
one_week_data = pd.read_csv(URL,
sep=",",
header=None,
names=[
"curr_status",
"curr_stop_sequence",
"direction_id",
"latitude",
"longitude",
"route_id",
"schedule_realtionship",
"stop_id",
"server_time",
"trip_id",
"system_time",
"vehicle_id"])
one_week_data = one_week_data[["server_time",
"route_id",
"curr_stop_sequence",
"latitude",
"longitude",
"direction_id",
"curr_status",
"schedule_realtionship",
"stop_id",
"trip_id",
"vehicle_id",
"system_time",
]]
one_week_data['curr_status'] = pd.to_numeric(one_week_data['curr_status'])
one_week_data['curr_stop_sequence'] = pd.to_numeric(one_week_data['curr_stop_sequence'])
one_week_data['direction_id'] = pd.to_numeric(one_week_data['direction_id'])
one_week_data['latitude'] = pd.to_numeric(one_week_data['latitude'])
one_week_data['longitude'] = pd.to_numeric(one_week_data['longitude'])
one_week_data['schedule_realtionship'] = pd.to_numeric(one_week_data['schedule_realtionship'])
one_week_data['stop_id'] = pd.to_numeric(one_week_data['stop_id'])
one_week_data['latitude'] = pd.to_numeric(one_week_data['latitude'])
one_week_data = one_week_data.drop(['curr_status', 'schedule_realtionship'], axis=1)
return(one_week_data)
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def time_of_day(ep):
ref = datetime(2018, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return sec
def distance(row):
source = (row['start_lat'], row['start_long'])
dest = ( row['end_lat'], row['end_long'])
return vincenty(source,dest).miles
Boston = (42.3601, -71.0589)
def start_to_CC(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_long'])
return vincenty(source,Boston).miles
def end_to_CC(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_long'])
return vincenty(dest,Boston).miles
def weather(data):
wdata = pd.read_csv('../datasets.nosync/weather_info.csv')
wdata = wdata[wdata['dt'] >= 1541177994 ]
wdata = wdata[wdata['dt'] < 1541625811 ]
for index, row in data.iterrows():
#print(index)
wrow = wdata.iloc[(wdata['dt']-row['server_time']).abs().argsort()[:1]]
#print(list(wrow['temp'])[0])
data.at[index,'temp'] = list(wrow['temp'])[0]
data.at[index,'pressure'] = list(wrow['pressure'])[0]
data.at[index,'humidity'] = list(wrow['humidity'])[0]
data.at[index,'wind_speed'] = list(wrow['wind_speed'])[0]
data.at[index,'clouds_all'] = list(wrow['wind_speed'])[0]
data.at[index,'weather_main'] = list(wrow['weather_main'])[0]
return(data)
def remove_outliers(data):
data = data[data['travel_time']<2000]
#data = data[data['travel_time']>120]
#data['travel_time'] = data['travel_time']/60
data=data[data['stop_id'] != data['end_stop']]
#data2=data[data['stop_id'] == data['end_stop']].sample(frac=0.25, replace=False)
#data = pd.concat([data1, data2])
#print(data)
return(data)
def add_other_features(data):
# Add day of the week and the dummy variable
data = weather(data)
data = pd.get_dummies(data, columns=['weather_main'])
DD = data['server_time'].map(day_of_week)
data['day'] = DD
DD = pd.get_dummies(DD,prefix='day')
data = pd.concat([data, DD], axis =1)
data = pd.get_dummies(data, columns=['route_id'])
data = data.drop(['day'], axis=1)
data['time_of_day'] = data['server_time'].map(time_of_day)
# distance between start and end of the trip
data['distance'] = data.apply(lambda x :distance(x), axis=1 )
#data['distance2'] = data['distance']**2
# distance between start, end, and center of City
data['start_CC'] = data.apply(start_to_CC, axis=1 )
data['end_CC'] = data.apply(end_to_CC, axis=1 )
data['velocity'] = np.array(data['distance']/(data['travel_time']/3600))
#Replace this part with IQR
data = data[data['velocity']<100]
data = data[data['velocity']>.5]
#data = data[data['travel_time']<300]
#data = data[data['travel_time']>10]
return(data)
def realtime_prediction_add_weather(data):
API_ENDPOINT = 'http://api.openweathermap.org/data/2.5/weather'
CITY_ID = '?q=Boston,US'
API_TOKEN = '&appid=f2945dde296e86ae509e15d26ded0bb1'
URL = API_ENDPOINT+CITY_ID+API_TOKEN
r = urllib2.urlopen(URL)
r = json.load(r)
#print(r['weather'][0]['main'])
t = r['main']['temp']
p = r['main']['pressure']
w = r['wind']['speed']
h = r['main']['humidity']
#v = r['visibility']
c = r['clouds']['all']
wm = 'weather_main_' + str(r['weather'][0]['main'])
data[wm] = 1
data['temp'] = t
data['pressure'] = p
data['wind_speed'] = w
data['humidity'] = h
data['clouds_all'] = c
return(data)
def realtime_prediction_add_other_features(data):
# Add day of the week and the dummy variable
#data = pd.get_dummies(data, columns=['weather_main'])
#print(data)
#print(list(data))
DD = data['server_time'].map(day_of_week)
data['day'] = DD
DD = pd.get_dummies(DD,prefix='day')
data = pd.concat([data, DD], axis =1)
#print(list(data))
data = pd.get_dummies(data, columns=['route_id'])
#print(list(data))
data = data.drop(['day'], axis=1)
data['time_of_day'] = data['server_time'].map(time_of_day)
# distance between start and end of the trip
data['distance'] = data.apply(lambda x :distance(x), axis=1 )
#data['distance2'] = data['distance']**2
# distance between start, end, and center of Boston
data['start_CC'] = data.apply(start_to_CC, axis=1 )
data['end_CC'] = data.apply(end_to_CC, axis=1 )
return(data)
def add_main_features_cords_tt(data):
for index, row in data.iterrows():
vid = row['vehicle_id']
rid = row['route_id']
start_lat = row['latitude']
start_long = row['longitude']
start_time = row['server_time']
for i in range(index+1, index+500):
if (i < data.shape[0] and vid == data["vehicle_id"].iloc[i] and rid == data["route_id"].iloc[i]):
print(index, i)
end_lat = data["latitude"].iloc[i]
end_long = data["longitude"].iloc[i]
end_time = data["server_time"].iloc[i]
data.at[index,'start_lat'] = start_lat
data.at[index,'start_long'] = start_long
data.at[index,'end_lat'] = end_lat
data.at[index,'end_long'] = end_long
data.at[index,'travel_time'] = end_time - start_time
break
data = data.drop(['system_time', 'latitude', 'longitude'], axis=1)
data = data.dropna()
data = data[data['travel_time'] != 0]
data.to_pickle('../datasets.nosync/data_intermediate.pkl')
return(data)
def add_remaining_variables_and_drop(data, rv):
for item in rv:
data[item] = 0
return(data)
def scale_realtime_prediction(data):
names = list(data)
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(data)
np_scaled = | pd.DataFrame(np_scaled, columns=names) | pandas.DataFrame |
"""
Evaluating passed experiments, for instance if metric was changed
The expected submission structure and required files:
* `registration-results.csv` - cover file with experimental results
* `computer-performances.json` - computer performance evaluation
* landmarks in CSV files with relative path described
in `registration-results.csv` in column 'Warped source landmarks'
The required files in the reference (ground truth):
* `dataset.csv` - cover file with planed registrations
* `computer-performances.json` - reference performance evaluation
* `lnds_provided/` provided landmarks in CSV files with relative path described
in `dataset.csv` in column 'Source landmarks'
* `lnds_reference/` reference (ground truth) landmarks in CSV files with relative
path described in `dataset_cover.csv` in both columns 'Target landmarks'
and 'Source landmarks'
Sample usage::
python evaluate_submission.py \
-e ./results/BmUnwarpJ \
-t ./data-images/pairs-imgs-lnds_histol.csv \
-d ./data-images \
-r ./data-images \
-p ./bm_experiments/computer-performances_cmpgrid-71.json \
-o ./output \
--min_landmarks 0.20
DOCKER
------
Running in grad-challenge.org environment::
python evaluate_submission.py \
-e /input \
-t /opt/evaluation/dataset.csv \
-d /opt/evaluation/lnds_provided \
-r /opt/evaluation/lnds_reference \
-p /opt/evaluation/computer-performances.json \
-o /output \
--min_landmarks 0.20
or run locally::
python bm_ANHIR/evaluate_submission.py \
-e bm_ANHIR/submission \
-t bm_ANHIR/dataset_ANHIR/dataset_medium.csv \
-d bm_ANHIR/dataset_ANHIR/landmarks_user_phase2 \
-r bm_ANHIR/dataset_ANHIR/landmarks_all \
-p bm_ANHIR/dataset_ANHIR/computer-performances_cmpgrid-71.json \
-o bm_ANHIR/output \
--min_landmarks 1.0
References:
* https://grand-challengeorg.readthedocs.io/en/latest/evaluation.html
Copyright (C) 2018-2019 <NAME> <<EMAIL>>
"""
import argparse
import json
import logging
import os
import re
import sys
import time
from functools import partial
import numpy as np
import pandas as pd
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from birl.utilities.data_io import create_folder, load_landmarks, save_landmarks, update_path
from birl.utilities.dataset import parse_path_scale
from birl.utilities.experiments import iterate_mproc_map, parse_arg_params, FORMAT_DATE_TIME, nb_workers
from birl.benchmark import COL_PAIRED_LANDMARKS, ImRegBenchmark, filter_paired_landmarks, _df_drop_unnamed
NB_WORKERS = nb_workers(0.9)
NAME_CSV_RESULTS = 'registration-results.csv'
NAME_JSON_COMPUTER = 'computer-performances.json'
NAME_JSON_RESULTS = 'metrics.json'
COL_NORM_TIME = 'Norm. execution time [minutes]'
COL_TISSUE = 'Tissue kind'
# FOLDER_FILTER_DATASET = 'filtered dataset'
CMP_THREADS = ('1', 'n')
#: Require having initial overlap as the warped is tricky as some image pairs do not
# have the same nb points, so recommend to set it as False
REQUIRE_OVERLAP_INIT_TARGET = False
def create_parser():
""" parse the input parameters
:return dict: parameters
"""
# SEE: https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument(
'-e', '--path_experiment', type=str, required=True, help='path to the experiments', default='/input/'
)
parser.add_argument(
'-t',
'--path_table',
type=str,
required=True,
help='path to cover table (csv file)',
default='/opt/evaluation/dataset.csv'
)
parser.add_argument(
'-d',
'--path_dataset',
type=str,
required=True,
help='path to dataset with provided landmarks',
default='/opt/evaluation/provided'
)
parser.add_argument(
'-r', '--path_reference', type=str, required=False, help='path to complete ground truth landmarks'
)
parser.add_argument(
'-p', '--path_comp_bm', type=str, required=False, help='path to reference computer performance JSON'
)
parser.add_argument(
'-o', '--path_output', type=str, required=True, help='path to output results', default='/output/'
)
# required number of submitted landmarks, match values in COL_PAIRED_LANDMARKS
parser.add_argument(
'--min_landmarks', type=float, required=False, default=0.5, help='ration of required landmarks in submission'
)
# parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS,
# help='number of processes in parallel')
parser.add_argument(
'--details', action='store_true', required=False, default=False, help='export details for each case'
)
return parser
def filter_export_landmarks(idx_row, path_output, path_dataset, path_reference):
""" filter all relevant landmarks which were used and copy them to experiment
The case is that in certain challenge stage users had provided just a subset
of all image landmarks which could be laos shuffled. The idea is to filter identify
all user used (provided in dataset) landmarks and filter them from temporary
reference dataset.
:param tuple(idx,dict|Series) idx_row: experiment DataFrame
:param str path_output: path to output folder
:param str path_dataset: path to provided landmarks
:param str path_reference: path to the complete landmark collection
:return tuple(idx,float): record index and match ratio
"""
idx, row = idx_row
ratio_matches, lnds_filter_ref, lnds_filter_move = \
filter_paired_landmarks(row, path_dataset, path_reference,
ImRegBenchmark.COL_POINTS_MOVE,
ImRegBenchmark.COL_POINTS_REF)
# moving and reference landmarks
for col, lnds_flt in [(ImRegBenchmark.COL_POINTS_REF, lnds_filter_ref),
(ImRegBenchmark.COL_POINTS_MOVE, lnds_filter_move)]:
path_out = update_path(row[col], pre_path=path_output)
create_folder(os.path.dirname(path_out), ok_existing=True)
if os.path.isfile(path_out):
assert np.array_equal(load_landmarks(path_out), lnds_flt), \
'overwrite different set of landmarks'
save_landmarks(path_out, lnds_flt)
return idx, ratio_matches
def normalize_exec_time(df_experiments, path_experiments, path_comp_bm=None):
""" normalize execution times if reference and experiment computer is given
:param DF df_experiments: experiment DataFrame
:param str path_experiments: path to experiment folder
:param str path_comp_bm: path to reference comp. benchmark
"""
path_comp_bm_expt = os.path.join(path_experiments, NAME_JSON_COMPUTER)
if ImRegBenchmark.COL_TIME not in df_experiments.columns:
logging.warning('Missing %s among result columns.', ImRegBenchmark.COL_TIME)
return
if not path_comp_bm:
logging.warning('Reference comp. perform. not specified.')
return
elif not all(os.path.isfile(p) for p in [path_comp_bm, path_comp_bm_expt]):
logging.warning(
'Missing one of the JSON files: \n %s (%s)\n %s (%s)', path_comp_bm, os.path.isfile(path_comp_bm),
path_comp_bm_expt, os.path.isfile(path_comp_bm_expt)
)
return
logging.info('Normalizing the Execution time.')
with open(path_comp_bm, 'r') as fp:
comp_ref = json.load(fp)
with open(path_comp_bm_expt, 'r') as fp:
comp_exp = json.load(fp)
time_ref = np.mean([comp_ref['registration @%s-thread' % i] for i in CMP_THREADS])
time_exp = np.mean([comp_exp['registration @%s-thread' % i] for i in CMP_THREADS])
coef = time_ref / time_exp
df_experiments[COL_NORM_TIME] = df_experiments[ImRegBenchmark.COL_TIME] * coef
def parse_landmarks(idx_row):
""" parse the warped landmarks and reference and save them as cases
:param tuple(int,series) idx_row: individual row
:return {str: float|[]}: parsed registration pair
"""
idx, row = idx_row
row = dict(row)
# lnds_ref = load_landmarks(update_path_(row[COL_POINTS_REF], path_experiments))
# lnds_warp = load_landmarks(update_path_(row[COL_POINTS_MOVE_WARP], path_experiments))
# if isinstance(row[COL_POINTS_MOVE_WARP], str)else np.array([[]])
path_dir = os.path.dirname(row[ImRegBenchmark.COL_POINTS_MOVE])
match_lnds = np.nan_to_num(row[COL_PAIRED_LANDMARKS]) if COL_PAIRED_LANDMARKS in row else 0.
item = {
'name-tissue': os.path.basename(os.path.dirname(path_dir)),
'scale-tissue': parse_path_scale(os.path.basename(path_dir)),
'type-tissue': row.get(COL_TISSUE, None),
'name-reference': os.path.splitext(os.path.basename(row[ImRegBenchmark.COL_POINTS_REF]))[0],
'name-source': os.path.splitext(os.path.basename(row[ImRegBenchmark.COL_POINTS_MOVE]))[0],
# 'reference landmarks': np.round(lnds_ref, 1).tolist(),
# 'warped landmarks': np.round(lnds_warp, 1).tolist(),
'matched-landmarks': match_lnds,
'Robustness': np.round(row.get(ImRegBenchmark.COL_ROBUSTNESS, 0), 3),
'Norm-Time_minutes': np.round(row.get(COL_NORM_TIME, None), 5),
'Status': row.get(ImRegBenchmark.COL_STATUS, None),
}
def _round_val(row, col):
dec = 5 if col.startswith('rTRE') else 2
return np.round(row[col], dec)
# copy all columns with Affine statistic
item.update({col.replace(' ', '-'): _round_val(row, col) for col in row if 'affine' in col.lower()})
# copy all columns with rTRE, TRE and Overlap
# item.update({col.replace(' (final)', '').replace(' ', '-'): row[col]
# for col in row if '(final)' in col})
item.update({
col.replace(' (elastic)', '_elastic').replace(' ', '-'): _round_val(row, col)
for col in row if 'TRE' in col
})
# later in JSON keys ahs to be str only
return str(idx), item
def compute_scores(df_experiments, min_landmarks=1.):
""" compute all main metrics
.. seealso:: https://anhir.grand-challenge.org/Evaluation/
:param DF df_experiments: complete experiments
:param float min_landmarks: required number of submitted landmarks in range (0, 1),
match values in COL_PAIRED_LANDMARKS
:return dict: results
"""
# if the initial overlap and submitted overlap do not mach, drop results
if 'overlap points (target)' not in df_experiments.columns:
raise ValueError('Missing `overlap points (target)` column, because there are probably missing wrap landmarks.')
unpaired = df_experiments[COL_PAIRED_LANDMARKS] < min_landmarks
hold_overlap = df_experiments['overlap points (init)'] == df_experiments['overlap points (target)']
mask_incomplete = unpaired.copy()
if REQUIRE_OVERLAP_INIT_TARGET:
mask_incomplete |= ~hold_overlap
# rewrite incomplete cases by initial stat
if sum(mask_incomplete) > 0:
for col_f, col_i in zip(*_filter_tre_measure_columns(df_experiments)):
df_experiments.loc[mask_incomplete, col_f] = df_experiments.loc[mask_incomplete, col_i]
df_experiments.loc[mask_incomplete, ImRegBenchmark.COL_ROBUSTNESS] = 0.
logging.warning(
'There are %i cases which incomplete landmarks - unpaired %i & missed overlap %i.',
sum(mask_incomplete),
sum(unpaired),
sum(~hold_overlap),
)
df_expt_robust = df_experiments[df_experiments[ImRegBenchmark.COL_ROBUSTNESS] > 0.5]
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
import pandas as pd
from predict_functions import build_rmsa_map, calculate_tournament_table, sort_table, predict_match
from utils.constants import Maps, Teams
# Pandas options for better printing
pd.set_option('display.max_columns', 500)
| pd.set_option('display.max_rows', 1000) | pandas.set_option |
from datetime import datetime
from typing import (
Any,
Dict,
List,
Optional,
)
import numpy
import pandas as pd
from sqlalchemy import (
BigInteger,
Column,
ForeignKey,
Index,
Integer,
JSON,
Text,
)
from sqlalchemy.orm import (
relationship,
Session
)
from hummingbot.model import HummingbotBase
from hummingbot.model.decimal_type_decorator import SqliteDecimal
class TradeFill(HummingbotBase):
__tablename__ = "TradeFill"
__table_args__ = (Index("tf_config_timestamp_index",
"config_file_path", "timestamp"),
Index("tf_market_trading_pair_timestamp_index",
"market", "symbol", "timestamp"),
Index("tf_market_base_asset_timestamp_index",
"market", "base_asset", "timestamp"),
Index("tf_market_quote_asset_timestamp_index",
"market", "quote_asset", "timestamp")
)
config_file_path = Column(Text, nullable=False)
strategy = Column(Text, nullable=False)
market = Column(Text, primary_key=True, nullable=False)
symbol = Column(Text, nullable=False)
base_asset = Column(Text, nullable=False)
quote_asset = Column(Text, nullable=False)
timestamp = Column(BigInteger, nullable=False)
order_id = Column(Text, ForeignKey("Order.id"), primary_key=True, nullable=False)
trade_type = Column(Text, nullable=False)
order_type = Column(Text, nullable=False)
price = Column(SqliteDecimal(6), nullable=False)
amount = Column(SqliteDecimal(6), nullable=False)
leverage = Column(Integer, nullable=False, default=1)
trade_fee = Column(JSON, nullable=False)
exchange_trade_id = Column(Text, primary_key=True, nullable=False)
position = Column(Text, nullable=True)
order = relationship("Order", back_populates="trade_fills")
def __repr__(self) -> str:
return f"TradeFill(config_file_path='{self.config_file_path}', strategy='{self.strategy}', " \
f"market='{self.market}', symbol='{self.symbol}', base_asset='{self.base_asset}', " \
f"quote_asset='{self.quote_asset}', timestamp={self.timestamp}, order_id='{self.order_id}', " \
f"trade_type='{self.trade_type}', order_type='{self.order_type}', price={self.price}, amount={self.amount}, " \
f"leverage={self.leverage}, trade_fee={self.trade_fee}, exchange_trade_id={self.exchange_trade_id}, position={self.position})"
@staticmethod
def get_trades(sql_session: Session,
strategy: str = None,
market: str = None,
trading_pair: str = None,
base_asset: str = None,
quote_asset: str = None,
trade_type: str = None,
order_type: str = None,
start_time: int = None,
end_time: int = None,
) -> Optional[List["TradeFill"]]:
filters = []
if strategy is not None:
filters.append(TradeFill.strategy == strategy)
if market is not None:
filters.append(TradeFill.market == market)
if trading_pair is not None:
filters.append(TradeFill.symbol == trading_pair)
if base_asset is not None:
filters.append(TradeFill.base_asset == base_asset)
if quote_asset is not None:
filters.append(TradeFill.quote_asset == quote_asset)
if trade_type is not None:
filters.append(TradeFill.trade_type == trade_type)
if order_type is not None:
filters.append(TradeFill.order_type == order_type)
if start_time is not None:
filters.append(TradeFill.timestamp >= start_time)
if end_time is not None:
filters.append(TradeFill.timestamp <= end_time)
trades: Optional[List[TradeFill]] = (sql_session
.query(TradeFill)
.filter(*filters)
.order_by(TradeFill.timestamp.asc())
.all())
return trades
@classmethod
def to_pandas(cls, trades: List):
columns: List[str] = ["Id",
"Timestamp",
"Exchange",
"Market",
"Order_type",
"Side",
"Price",
"Amount",
"Leverage",
"Position",
"Age"]
data = []
for trade in trades:
# // indicates order is a paper order so 'n/a'. For real orders, calculate age.
age = "n/a"
if "//" not in trade.order_id:
age = pd.Timestamp(int(trade.timestamp / 1e3 - int(trade.order_id[-16:]) / 1e6), unit='s').strftime('%H:%M:%S')
data.append([
trade.exchange_trade_id,
datetime.fromtimestamp(int(trade.timestamp / 1e3)).strftime("%Y-%m-%d %H:%M:%S"),
trade.market,
trade.symbol,
trade.order_type.lower(),
trade.trade_type.lower(),
trade.price,
trade.amount,
trade.leverage,
trade.position,
age,
])
df = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import streamlit as st
import math
from utilityfunctions import loadPowerCurve, binWindResourceData, searchSorted, preProcessing, getAEP, checkConstraints
from shapely.geometry import Point # Imported for constraint checking
from shapely.geometry.polygon import Polygon
import plotly.express as px
from geneticalgorithm import geneticalgorithm as ga
import randomsearch as rs
import pyswarms as ps
import random
import warnings
warnings.filterwarnings("ignore")
def optimizer(wind_data, powercurve_data):
st.write('''## Optimizer Result''')
st.write('Uses Genetic Algorithm to converge to optimal x and y coordinates')
if wind_data is not None and powercurve_data is not None :
power_curve_data = loadPowerCurve(powercurve_data)
st.success("Powerdata loaded successfully")
wind_data = binWindResourceData(wind_data)
st.success("winddata loaded sucessfully")
# Turbine Specifications.
st.write('''## Turbine Specifications''')
global turb_diam, turb_height
turb_diam = st.number_input("Turbine Diameter (in m)",min_value= 60, max_value=120, value=100, step=1)
turb_height = st.number_input("Turbine Height (in m)",min_value= 80, max_value=140, value=100, step=1)
turb_specs = {
'Name': '<NAME>',
'Vendor': 'Anon Vendor',
'Type': 'Anon Type',
'Dia (m)': turb_diam,
'Rotor Area (m2)': 7853,
'Hub Height (m)': turb_height,
'Cut-in Wind Speed (m/s)': 3.5,
'Cut-out Wind Speed (m/s)': 25,
'Rated Wind Speed (m/s)': 15,
'Rated Power (MW)': 3
}
turb_diam = turb_specs['Dia (m)']
turb_rad = turb_diam/2
power_curve = power_curve_data
wind_inst_freq = wind_data
st.write('''## Field Specifications''')
global n
n = st.number_input("Number of turbines, n",min_value= 10, max_value=60, value=40, step=1)
side = st.slider("side length (in m)", min_value = 100, max_value = 10000, value = 4000) # in m , 100 - 10,000
st.write('''## Constraints''')
peri_constr = st.number_input("Perimeter constraint (in m)",min_value= 10, max_value=100, value=50, step=1) # 10 - 100
prox_constr = st.number_input("Proximity constraint (in m)",min_value= 250, max_value=600, value=400, step=1) # 250-800
n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t = preProcessing(power_curve, n)
st.write('''## Select the Algorithms to use''')
if st.checkbox('Genetic Algorithm', value=False):
col1, col2 = st.beta_columns([0.5, 9.5])
col2.subheader("Using Genetic Algorithm for optimization")
max_iter = col2.slider("Max Number of Iterations", min_value = 10, max_value = 1000, value = 50)
population_size = col2.number_input("Population size",min_value= 10, max_value=100, value= 30, step=1)
var_bound = np.array([[peri_constr,side - peri_constr]]*(2*n))
algorithm_param = {'max_num_iteration':max_iter,\
'population_size':population_size,\
'mutation_probability':0.1,\
'elit_ratio': 0.2,\
'crossover_probability': 0.5,\
'parents_portion': 0.3,\
'crossover_type':'uniform',\
'max_iteration_without_improv':150}
col2.write('If values are set click on run')
if col2.button('Run'):
def f(z):
pen = 0
for i in range(n):
for j in range(i):
dist = math.sqrt((z[i]-z[j])**2+(z[n+i]-z[n+j])**2)
if dist>prox_constr:
pen = pen + 600 + 1000*dist
data_dict = {'x':list(z[0:n]),'y':list(z[n:2*n])}
df1 = pd.DataFrame(data_dict)
global turb_coords_1
turb_coords_1 = df1.to_numpy(dtype = np.float32)
AEP = getAEP(turb_rad, turb_coords_1, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t)
return (pen-AEP)
model = ga(function=f,dimension=n*2,variable_boundaries=var_bound,variable_type='real',algorithm_parameters=algorithm_param)
col2.write("model is running. please wait")
model.run()
checkConstraints(turb_coords_1, turb_diam)
col2.subheader('Optimized AEP obtained:')
col2.write(getAEP(turb_rad, turb_coords_1, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t))
col2.subheader('Optimal Coordinates')
col2.write(turb_coords_1)
# Plot
col2.subheader('Field Plot')
fig = px.scatter(turb_coords_1, x = turb_coords_1[:, 0], y = turb_coords_1[:, 1])
col2.plotly_chart(fig, use_container_width=True)
if st.checkbox('Random Search Algorithm', value=False):
col1, col2 = st.beta_columns([0.5, 9.5])
col2.subheader("Using RS for optimization")
max_iter = col2.slider("Max Number of Iterations", min_value = 10, max_value = 1000, value = 50)
col2.write('If values are set click on run')
if col2.button('Run'):
def f(z):
pen = 0
for i in range(n):
for j in range(i):
dist = math.sqrt((z[i]-z[j])**2+(z[n+i]-z[n+j])**2)
if dist>prox_constr:
pen = pen + 600 + 1000*dist
data_dict = {'x':list(z[0:n]),'y':list(z[n:2*n])}
df1 = pd.DataFrame(data_dict)
global turb_coords_2
turb_coords_2 = df1.to_numpy(dtype = np.float32)
AEP = getAEP(turb_rad, turb_coords_2, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t)
return (AEP-pen)
col2.write("model is running. please wait")
a, b = rs.optimize(function=f, dimensions=n*2, lower_boundary=np.array([peri_constr]*(n*2)), upper_boundary= np.array([side-peri_constr]*(n*2)), max_iter=1000, maximize=True)
col2.write('a:'); st.write(a)
data_dict = {'x':list(b[0:n]),'y':list(b[n:2*n])}
df1 = pd.DataFrame(data_dict)
global turb_coords_final
turb_coords_final = df1.to_numpy(dtype = np.float32)
checkConstraints(turb_coords_final, turb_diam)
col2.write('Optimized AEP obtained:')
col2.write(getAEP(turb_rad, turb_coords_final, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t))
col2.write('Optimal Coordinates')
col2.write(turb_coords_final)
# Plot
col2.subheader('Field Plot')
fig = px.scatter(turb_coords_final, x = turb_coords_final[:, 0], y = turb_coords_final[:, 1])
col2.plotly_chart(fig, use_container_width=True)
if st.checkbox('Particle Swarm Algorithm', value=False):
col1, col2 = st.beta_columns([0.5, 9.5])
col2.write("Using Particle Swarm for Optimization")
max_iter = col2.slider("Max Number of Iterations", min_value = 10, max_value = 1000, value = 50)
p = col2.number_input("P Normalization",min_value= 1, max_value=2, value= 2, step=1)
k = col2.number_input("Number Of Neighbours",min_value= 1, max_value= n*2, value= 2, step=1)
col2.write('If values are set click on run')
if col2.button('Run'):
def f(z):
pen = 0
for i in range(n):
for j in range(i):
dist = math.sqrt((z[i]-z[j])**2+(z[n+i]-z[n+j])**2)
if dist>prox_constr:
pen = pen + 600 + 1000*dist
data_dict = {'x':list(z[0:n]),'y':list(z[n:2*n])}
df1 = pd.DataFrame(data_dict)
global turb_coords_3
turb_coords_3 = df1.to_numpy(dtype = np.float32)
# print(z.shape)
AEP = getAEP(turb_rad, turb_coords_3, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t)
return (pen-AEP)
temp = (np.array([peri_constr]),np.array([side-peri_constr]))
# Set-up hyperparameters
options = {'c1': 0.5, 'c2': 0.3, 'w':0.5, 'k': k, 'p': p}
# Call instance of PSO
optimizer = ps.single.LocalBestPSO(n_particles=n*2, dimensions=1, options=options, bounds=temp)
# Perform optimization
col2.write("model is running. please wait")
cost, pos = optimizer.optimize(f, iters=1000)
checkConstraints(turb_coords_3, turb_diam)
col2.subheader('Optimized AEP obtained:')
col2.write(getAEP(turb_rad, turb_coords_3, power_curve, wind_inst_freq, n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t))
col2.subheader('Optimal Coordinates')
col2.write(turb_coords_3)
# Plot
col2.subheader('Field Plot')
fig = px.scatter(turb_coords_3, x = turb_coords_3[:, 0], y = turb_coords_3[:, 1])
col2.plotly_chart(fig, use_container_width=True)
if st.checkbox('Greedy Search Algorithm', value=False):
col1, col2 = st.beta_columns([0.5, 9.5])
col2.write("Using Greedy Search for Optimization")
max_iter = col2.slider("Max Number of Iterations", min_value = 10, max_value = 1000, value = 100)
col2.write('If values are set click on run')
if col2.button('Run'):
x_new = np.zeros(n)
y_new = np.zeros(n)
iter=0
val1 = (int)((int)(side/2)-prox_constr)
val2 = (int)((int)(side/2)+prox_constr)
if n%2==0:
x = np.concatenate( (np.array([val1]*int(n/2)), np.array([val2]*int(n/2)) ))
y = np.concatenate(( np.array([val2]*int(n/2)),np.array([val1]*int(n/2)) ))
else:
x = np.concatenate(np.array([val1]*(int)((n-1)/2)),np.array([val2]*(int)((n+1)/2)))
y = np.concatenate(np.array([val2]*(int)((n-1)/2)),np.array([val1]*(int)((n+1)/2)))
data_dict = {'x':list(x),'y':list(y)}
df1 = | pd.DataFrame(data_dict) | pandas.DataFrame |
# BSD 3-Clause License
#
# Copyright (c) 2019 Alliance for Sustainable Energy, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Created on Wed Nov 7 11:36:00 2018
@author: akumler
This is the second version of the solar forecasting application for GridAPPS-D.
Making some improvements to the code to handle special cases.
"""
# Import packages
import pandas as pd
import numpy as np
from math import *
from datetime import datetime
import math
from pvlib.solarposition import *
from pvlib.atmosphere import *
from pvlib.clearsky import *
from pvlib.irradiance import *
from sklearn.metrics import *
from scipy import stats, integrate
from pspi_simple_v2 import *
import time
from dateutil import tz
"""
Code used to listen and get GHI observation goes here. It will listen for a
date and time, along with a GHI observation. If one desires to use more input,
that can easily be coded in.
Time is in epoch. Timezone is eventually needed, and can be changed in the code.
"""
# How to plan for a 'None' in the middle of the day?
# JSON input goes somewhere here.
# ghi_obs = 200
# date_time = pd.datetime(2013, 8, 17, 5, 22)
# df['time'] = pd.to_datetime(df['time'],unit='m')
# epoch_time = int(time.time())
# current_date = pd.to_datetime(epoch_time, unit='s').round('min')
# current_time = pd.Timestamp.now(tz='US/Mountain').round('min')
# date_time = pd.DatetimeIndex()
def the_forecast(ghi_obs, current_date):
""" Atmospheric constants (can be replaced by observed values) that are mostly
pertaining to the SRRL site in Golden, Colorado for the year 2013. If another
site is desired, seperate analysis should be done for that site to find the
appropriate constant.
ghi_obs float ghi
date
"""
# Atmospheric Ozone concentraiton
Ozone_cm = 0.3
# Precipitable water
H20_cm = 1.07
# Aerosol optical depth @ 500nm
AOD500nm = 0.0823
# Aerosol optical depth @ 380nm
AOD380nm = 0.1
# Approximate broadband aerosol optical depth
Taua = 0.05
# Asymmetry factor
Ba = 0.86
# Surface albedo
a_s = 0.2
# Phase function?
b = 0.5 - (0.5 * 0.86)
# Site pressure (not sea-level corrected, pascals)
pressure = 82000
# Altitude (meters)
altitude = 1829.0
# Latitude of site
lat = 39.742
# Longitude of site
lon = -105.18
# Time Zone
tz = 'US/Mountain'
# Clear-sky transmittance. This value is according to the value used in
# Kumler et al. 2018. The code to compute it exists in 'pspi_module.py', and
# can be changed, but it is currently commented out.
clear_transmit = 0.78904
"""
First, we much check sunrise and sunset. If set, produced a 0 GHI forecast.
In addition, we have to check for special cases where a forecast can't be
made, or it problems with the platform occur that the application can handle
that and continue forecasting if necessary.
"""
global previous_obs
try:
ghi_obs
except IndexError:
print('Got a non-numerical GHI observation. Assuming persistent observation')
ghi_obs = previous_obs
# Get a valid time, whether or not we are given one
valid_time = valid_datetime(current_date)
rise_set = get_sun_rise_set_transit(valid_time, lat, lon)
rise_set.reset_index(inplace=True, drop=True)
sunrise = rise_set['sunrise']
sunset = rise_set['sunset']
to_forecast = time_to_forecast(sunrise, sunset, valid_time, timezone=tz)
if (to_forecast == True):
print('Making forecast')
elif (to_forecast == False):
final_ghi_forecast = 0
print('Sun is not up. No forecast needed')
return final_ghi_forecast
# Get the last valid GHI value
global latest_ghi
try:
latest_ghi
except NameError:
print('Not yet defined. This is probably the first run of the day.')
# Could just return an empty 'final_ghi_forecast', or just have it have
# one value, which is None or 0.
latest_ghi = None
else:
lastest_ghi = last_valid_ghi(ghi_obs=ghi_obs)
# Get a valid ghi observation, whether we are given one or not
obs = pd.Series(ghi_obs)
valid_obs = valid_ghi(ghi_obs=obs, latest_ghi=latest_ghi)
# In the original solar forecasting code, the last 15 minutes were removed
# due to possible shading of the pyranometer. This was at the SRRL site, so
# this may not happen everywhere. Because of this, it will not be included
# in this first version of the PSPI application.
# Now that we have a valid date time and ghi observation, we can begin to
# construct the forecast. First we obtain the solar zenith angle (SZA) and
# extraterrestrial radiation.
valid_time_tz = valid_time.tz_localize(tz='MST')
sza_data = spa_python(valid_time, lat, lon, altitude)
sza_valid = valid_sza_data(sza_data)
doy_fraction = valid_time_tz.dayofyear
if (math.isnan(sza_valid['elevation'].iloc[0]) == True):
final_forecast = 0.0
final_forecast = pd.Series(final_forecast, index=valid_time)
return final_forecast
else:
# If the solar zenith angle is greater than 87 degrees, the forecast is not
# computed due to sun proximity on horizon, and thus an erroneous forecast.
apparent_zenith = sza_valid['apparent_zenith'].copy()
zenith = sza_valid['zenith'].copy()
apparent_elev = sza_valid['apparent_elevation'].copy()
ext_data = get_extra_radiation(valid_time, epoch_year=valid_time.year, method='nrel', solar_constant=1366.1)
# Calculate relative and absolute airmass
ghi_r_airmass = get_relative_airmass(apparent_zenith, model='kasten1966')
ghi_a_airmass = get_absolute_airmass(ghi_r_airmass, pressure=pressure)
# Alternate way to calculate Linke turbidity
bird_aod = bird_hulstrom80_aod_bb(aod380=AOD380nm, aod500=AOD500nm)
kasten_linke2 = kasten96_lt(ghi_a_airmass, precipitable_water=1.07, aod_bb=bird_aod)
# Ineichen-Perez clear-sky GHI model
cs_ineichen_perez = ineichen(apparent_zenith, airmass_absolute=ghi_a_airmass, linke_turbidity=kasten_linke2,
altitude=altitude, dni_extra=ext_data)
clearsky_ghi = cs_ineichen_perez['ghi']
clearsky_dni = cs_ineichen_perez['dni']
# Do a last dummy check to make sure the GHI observation is valid.
if (valid_obs[0] < 0):
valid_obs[0] = clearsky_ghi[0]
elif (valid_obs[0] > ext_data[0]):
valid_obs[0] = ext_data[0]
# Run the ERBS model to get direct normal irradiance (DNI)
# Produces DNI, kt (clearness index), and an airmass value
dni = erbs(valid_obs[0], zenith[0], doy_fraction)
actual_dni = dni['dni']
# Calculate future solar zenith angle
# Need to calculate a future SZA.
future_df = future_data(valid_time, apparent_zenith=apparent_zenith, lat=lat,
lon=lon, altitude=altitude, aod380=AOD380nm,
aod500=AOD380nm, precipitable_water=H20_cm,
ozone=Ozone_cm, pressure=pressure, asymmetry=Ba,
albedo=a_s)
"""
Just about time to make the forecast.
"""
final_ghi_forecast = ghi_forecast(valid_time, ghi_obs=valid_obs, cs_transmit=clear_transmit,
clearsky_ghi=clearsky_ghi, clearsky_dni=clearsky_dni, dni=actual_dni,
zenith=apparent_zenith, future_zenith=future_df['Future_Apparent_SZA'],
future_cs_ghi=future_df['Future_Clearsky_GHI'],
future_time=future_df['Future_Time'],
albedo=a_s)
return final_ghi_forecast
# ghi_forecast_final = the_forecast(ghi_obs, time=current_date)
#
# print(str(ghi_forecast_final[0]))
if __name__ == '__main__':
# ghi_obs = 200
# epoch_time = 1357140600
# current_date = pd.to_datetime(epoch_time, unit='s').round('min')
# print(current_date)
# ghi_forecast_final = the_forecast(ghi_obs, current_date)
# print(str(ghi_forecast_final[0]))
#
#
# ghi_obs = 200
# # date_time = pd.datetime(2013, 8, 17, 5, 22)
# # df['time'] = pd.to_datetime(df['time'],unit='m')
# epoch_time = int(time.time())
# current_date = pd.to_datetime(epoch_time, unit='s').round('min')
# ghi_forecast_final = the_forecast(ghi_obs, current_date)
# print(str(ghi_forecast_final[0]))
#
#
# ghi_obs = 200
# # date_time = pd.datetime(2013, 8, 17, 5, 22)
# # df['time'] = pd.to_datetime(df['time'],unit='m')
# epoch_time = int(time.time()) + 60
# print(epoch_time)
# current_date = pd.to_datetime(epoch_time, unit='s').round('min')
# print(current_date)
# ghi_forecast_final = the_forecast(ghi_obs, current_date)
# print(str(ghi_forecast_final[0]))
#
ghi_obs = 200
# # date_time = pd.datetime(2013, 8, 17, 5, 22)
# # df['time'] = pd.to_datetime(df['time'],unit='m')
# epoch_time = int(time.time()) + 3600
# # 1543859977
# epoch_time = 1357048800 + 3600 * 2
epoch_time = 1374394442
epoch_time = int(datetime.utcfromtimestamp(epoch_time).replace(tzinfo=tz.gettz('US/Mountain')).timestamp())
current_date = | pd.to_datetime(epoch_time, unit='s') | pandas.to_datetime |
#!/usr/bin/env python
# Copyright (C) 2019 <NAME>
import os
import logging
import argparse
import numpy as np
import pandas as pd
from crispy import SSGSEA, GSEAplot
from dtrace.DTraceUtils import dpath
from dtrace.Associations import Association
from scipy.stats.distributions import hypergeom
from statsmodels.stats.multitest import multipletests
class DTraceEnrichment:
"""
Gene enrichment analysis class.
"""
def __init__(
self, gmts, sig_min_len=5, verbose=0, padj_method="fdr_bh", permutations=0
):
self.verbose = verbose
self.padj_method = padj_method
self.permutations = permutations
self.sig_min_len = sig_min_len
self.gmts = {f: self.read_gmt(f"{dpath}/pathways/{f}") for f in gmts}
def __assert_gmt_file(self, gmt_file):
assert gmt_file in self.gmts, f"{gmt_file} not in gmt files: {self.gmts.keys()}"
def __assert_signature(self, gmt_file, signature):
self.__assert_gmt_file(gmt_file)
assert signature in self.gmts[gmt_file], f"{signature} not in {gmt_file}"
@staticmethod
def read_gmt(file_path):
with open(file_path) as f:
signatures = {
l.split("\t")[0]: set(l.strip().split("\t")[2:]) for l in f.readlines()
}
return signatures
def gsea(self, values, signature):
return SSGSEA.gsea(values.to_dict(), signature, permutations=self.permutations)
def gsea_enrichments(self, values, gmt_file):
self.__assert_gmt_file(gmt_file)
geneset = self.gmts[gmt_file]
if self.verbose > 0 and type(values) == pd.Series:
logging.getLogger("DTrace").info(f"Values={values.name}")
ssgsea = []
for gset in geneset:
if self.verbose > 1:
logging.getLogger("DTrace").info(f"Gene-set={gset}")
gset_len = len({i for i in geneset[gset] if i in values.index})
e_score, p_value, _, _ = self.gsea(values, geneset[gset])
ssgsea.append(
dict(gset=gset, e_score=e_score, p_value=p_value, len=gset_len)
)
ssgsea = pd.DataFrame(ssgsea).set_index("gset").sort_values("e_score")
if self.sig_min_len is not None:
ssgsea = ssgsea.query(f"len >= {self.sig_min_len}")
if self.permutations > 0:
ssgsea["adj.p_value"] = multipletests(
ssgsea["p_value"], method=self.padj_method
)[1]
return ssgsea
def get_signature(self, gmt_file, signature):
self.__assert_signature(gmt_file, signature)
return self.gmts[gmt_file][signature]
def plot(self, values, gmt_file, signature, vertical_lines=False, shade=False):
if type(signature) == str:
signature = self.get_signature(gmt_file, signature)
e_score, p_value, hits, running_hit = self.gsea(values, signature)
ax = GSEAplot.plot_gsea(
hits,
running_hit,
dataset=values.to_dict(),
vertical_lines=vertical_lines,
shade=shade,
)
return ax
@staticmethod
def hypergeom_test(signature, background, sublist):
"""
Performs hypergeometric test
Arguements:
signature: {string} - Signature IDs
background: {string} - Background IDs
sublist: {string} - Sub-set IDs
# hypergeom.sf(x, M, n, N, loc=0)
# M: total number of objects,
# n: total number of type I objects
# N: total number of type I objects drawn without replacement
"""
pvalue = hypergeom.sf(
len(sublist.intersection(signature)),
len(background),
len(background.intersection(signature)),
len(sublist),
)
intersection = len(sublist.intersection(signature))
return pvalue, intersection
def hypergeom_enrichments(self, sublist, background, gmt_file):
self.__assert_gmt_file(gmt_file)
geneset = self.gmts[gmt_file]
ssgsea_geneset = []
for gset in geneset:
if self.verbose > 0:
logging.getLogger("DTrace").info(f"Gene-set={gset}")
p_value, intersection = self.hypergeom_test(
signature=geneset[gset], background=background, sublist=sublist
)
ssgsea_geneset.append(
dict(
gset=gset,
p_value=p_value,
len_sig=len(geneset[gset]),
len_intersection=intersection,
)
)
ssgsea_geneset = (
| pd.DataFrame(ssgsea_geneset) | pandas.DataFrame |
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def getBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if pd.isna(el1) or pd.isna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurename
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprsum = seq['features']['gpr_Frankland_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = max(phrase_ix) + 1
pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informationcontent' in seq['features'].keys():
informationcontent = seq['features']['informationcontent']
pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informationcontentfirst'] = 'numeric'
arfftype['informationcontentsecond'] = 'numeric'
arfftype['informationcontentthird'] = 'numeric'
arfftype['informationcontentfourth'] = 'numeric'
arfftype['informationcontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largetosmall'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.max(1) - diat.min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenominator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['beatstrengthfirst'] = 'numeric'
arfftype['beatstrengthsecond'] = 'numeric'
arfftype['beatstrengththird'] = 'numeric'
arfftype['beatstrengthfourth'] = 'numeric'
arfftype['beatstrengthfifth'] = 'numeric'
#these will be in crossrelations: beatstrengthfirstsecond, etc.
#pgrams['metriccontourfirst'] = [metriccontour[int(ix)] for ix in pgrams['ix0_0']]
#pgrams['metriccontoursecond'] = [metriccontour[int(ix)] for ix in pgrams['ix1_0']]
#pgrams['metriccontourthird'] = [metriccontour[int(ix)] for ix in pgrams['ix2_0']]
#pgrams['metriccontourfourth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
#pgrams['metriccontourfifth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
#arfftype['metriccontourfirst'] = '{-,=,+}'
#arfftype['metriccontoursecond'] = '{-,=,+}'
#arfftype['metriccontourthird'] = '{-,=,+}'
#arfftype['metriccontourfourth'] = '{-,=,+}'
#arfftype['metriccontourfifth'] = '{-,=,+}'
pgrams['IOIbeatfractionfirst'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix0_0'],pgrams['ix0_1'])]
pgrams['IOIbeatfractionsecond'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix1_0'],pgrams['ix1_1'])]
pgrams['IOIbeatfractionthird'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix2_0'],pgrams['ix2_1'])]
pgrams['IOIbeatfractionfourth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix3_0'],pgrams['ix3_1'])]
pgrams['IOIbeatfractionfifth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix4_0'],pgrams['ix4_1'])]
arfftype['IOIbeatfractionfirst'] = 'numeric'
arfftype['IOIbeatfractionsecond'] = 'numeric'
arfftype['IOIbeatfractionthird'] = 'numeric'
arfftype['IOIbeatfractionfourth'] = 'numeric'
arfftype['IOIbeatfractionfifth'] = 'numeric'
pgrams['durationcummulation'] = [((d2 > d1) and (d3 > d2)) for d1, d2, d3 in \
zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
arfftype['durationcummulation'] = '{True, False}'
#these will be in crossrelation: IOIbeatfractionfirstsecond, etc.
#pgrams['durationcontoursecond'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'])]
#pgrams['durationcontourthird'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
#pgrams['durationcontourfourth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionthird'],pgrams['IOIbeatfractionfourth'])]
#pgrams['durationcontourfifth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfourth'],pgrams['IOIbeatfractionfifth'])]
#arfftype['durationcontoursecond'] = '{-,=,+}'
#arfftype['durationcontourthird'] = '{-,=,+}'
#arfftype['durationcontourfourth'] = '{-,=,+}'
#arfftype['durationcontourfifth'] = '{-,=,+}'
pgrams['onthebeatfirst'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix0_0']]
pgrams['onthebeatsecond'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix1_0']]
pgrams['onthebeatthird'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix2_0']]
pgrams['onthebeatfourth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['onthebeatfifth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['onthebeatfirst'] = '{True, False}'
arfftype['onthebeatsecond'] = '{True, False}'
arfftype['onthebeatthird'] = '{True, False}'
arfftype['onthebeatfourth'] = '{True, False}'
arfftype['onthebeatfifth'] = '{True, False}'
pgrams['completesmeasurephrase'] = [completesmeasure_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesmeasuresong'] = [completesmeasure_song[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatphrase'] = [completesbeat_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatsong'] = [completesbeat_song[ix-1] for ix in pgrams['ix2_1']]
arfftype['completesmeasurephrase'] = '{True, False}'
arfftype['completesmeasuresong'] = '{True, False}'
arfftype['completesbeatphrase'] = '{True, False}'
arfftype['completesbeatsong'] = '{True, False}'
if 'grouper' in seq['features'].keys():
grouper = seq['features']['grouper']
pgrams['grouperfirst'] = [grouper[int(ix)] for ix in pgrams['ix0_0']]
pgrams['groupersecond'] = [grouper[int(ix)] for ix in pgrams['ix1_0']]
pgrams['grouperthird'] = [grouper[int(ix)] for ix in pgrams['ix2_0']]
pgrams['grouperfourth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['grouperfifth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['grouperfirst'] = '{True, False}'
arfftype['groupersecond'] = '{True, False}'
arfftype['grouperthird'] = '{True, False}'
arfftype['grouperfourth'] = '{True, False}'
arfftype['grouperfifth'] = '{True, False}'
#values for final note of third group
pgrams['noteoffset'] = pd.array([(ix-1) - phrasestart_ix[(ix-1)] for ix in pgrams['ix2_1']], dtype="Int16")
pgrams['beatoffset'] = [float(Fraction(beatinphrase[ix-1])) - \
float(Fraction(beatinphrase[phrasestart_ix[(ix-1)]])) \
for ix in pgrams['ix2_1']]
arfftype['noteoffset'] = 'numeric'
arfftype['beatoffset'] = 'numeric'
pgrams['beatduration'] = [getBeatDuration(timesig[int(ix)]) for ix in pgrams['ix0_0']]
pgrams['beatcount'] = pd.array([m21.meter.TimeSignature(timesig[int(ix)]).beatCount for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['beatduration'] = 'numeric'
arfftype['beatcount'] = 'numeric'
#get values for the last note!
pgrams['gpr2afirst'] = [gpr2a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2asecond'] = [gpr2a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2athird'] = [gpr2a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2afourth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2afifth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2afirst'] = 'numeric'
arfftype['gpr2asecond'] = 'numeric'
arfftype['gpr2athird'] = 'numeric'
arfftype['gpr2afourth'] = 'numeric'
arfftype['gpr2afifth'] = 'numeric'
pgrams['gpr2bfirst'] = [gpr2b[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2bsecond'] = [gpr2b[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2bthird'] = [gpr2b[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2bfourth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2bfifth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2bfirst'] = 'numeric'
arfftype['gpr2bsecond'] = 'numeric'
arfftype['gpr2bthird'] = 'numeric'
arfftype['gpr2bfourth'] = 'numeric'
arfftype['gpr2bfifth'] = 'numeric'
pgrams['gpr3afirst'] = [gpr3a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3asecond'] = [gpr3a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3athird'] = [gpr3a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3afourth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3afifth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3afirst'] = 'numeric'
arfftype['gpr3asecond'] = 'numeric'
arfftype['gpr3athird'] = 'numeric'
arfftype['gpr3afourth'] = 'numeric'
arfftype['gpr3afifth'] = 'numeric'
pgrams['gpr3dfirst'] = [gpr3d[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3dsecond'] = [gpr3d[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3dthird'] = [gpr3d[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3dfourth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3dfifth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3dfirst'] = 'numeric'
arfftype['gpr3dsecond'] = 'numeric'
arfftype['gpr3dthird'] = 'numeric'
arfftype['gpr3dfourth'] = 'numeric'
arfftype['gpr3dfifth'] = 'numeric'
pgrams['gprsumfirst'] = [gprsum[ix-1] for ix in pgrams['ix0_1']]
pgrams['gprsumsecond'] = [gprsum[ix-1] for ix in pgrams['ix1_1']]
pgrams['gprsumthird'] = [gprsum[ix-1] for ix in pgrams['ix2_1']]
pgrams['gprsumfourth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gprsumfifth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gprsumfirst'] = 'numeric'
arfftype['gprsumsecond'] = 'numeric'
arfftype['gprsumthird'] = 'numeric'
arfftype['gprsumfourth'] = 'numeric'
arfftype['gprsumfifth'] = 'numeric'
pgrams['pitchproximityfirst'] = pd.array([pprox[ix] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['pitchproximitysecond'] = pd.array([pprox[ix] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['pitchproximitythird'] = pd.array([pprox[ix] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['pitchproximityfourth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['pitchproximityfifth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['pitchproximityfirst'] = 'numeric'
arfftype['pitchproximitysecond'] = 'numeric'
arfftype['pitchproximitythird'] = 'numeric'
arfftype['pitchproximityfourth'] = 'numeric'
arfftype['pitchproximityfifth'] = 'numeric'
pgrams['pitchreversalfirst'] = [prev[ix] for ix in pgrams['ix0_0']]
pgrams['pitchreversalsecond'] = [prev[ix] for ix in pgrams['ix1_0']]
pgrams['pitchreversalthird'] = [prev[ix] for ix in pgrams['ix2_0']]
pgrams['pitchreversalfourth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['pitchreversalfifth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['pitchreversalfirst'] = 'numeric'
arfftype['pitchreversalsecond'] = 'numeric'
arfftype['pitchreversalthird'] = 'numeric'
arfftype['pitchreversalfourth'] = 'numeric'
arfftype['pitchreversalfifth'] = 'numeric'
#get values for last note in pitchgroup
pgrams['lbdmpitchfirst'] = [lbdmpitch[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmpitchsecond'] = [lbdmpitch[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmpitchthird'] = [lbdmpitch[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmpitchfourth'] = [lbdmpitch[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmpitchfifth'] = [lbdmpitch[ix-1] if not | pd.isna(ix) | pandas.isna |
"""
Base IO for all periodic datasets
"""
import os
import warnings
import pandas as pd
from ._base import get_data_home
def deduplicate(S):
"""deduplicate pd.Series by removing rows with same index and values"""
dedup = S.groupby(S.index).first()
diff = len(S) - len(dedup)
if diff:
warnings.warn(f"found {diff} duplicates in S, removing them")
return dedup
return S
def fetch_health_app(data_home=None, filename="health_app.csv"):
"""Fetch and return the health app log dataset
see: https://github.com/logpai/loghub
HealthApp is a mobile application for Android devices.
Logs were collected from an Android smartphone after 10+ days of use.
Logs have been grouped by their types, hence resulting
in only 20 different events.
============================== ===================================
Number of events 20
Average delta per event Timedelta('0 days 00:53:24.984000')
Average nb of points per event 100.0
============================== ===================================
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-mine data is stored in `scikit-mine_data`.
Returns
-------
pd.Series
System logs from the health app dataset, as an in-memory pandas Series.
Events are indexed by timestamps.
"""
data_home = data_home or get_data_home()
p = os.path.join(data_home, filename)
kwargs = dict(header=None, index_col=0, squeeze=True, dtype="string")
if filename in os.listdir(data_home):
s = pd.read_csv(p, **kwargs)
else:
s = pd.read_csv(
"https://raw.githubusercontent.com/logpai/loghub/master/HealthApp/HealthApp_2k.log",
sep="|",
error_bad_lines=False,
usecols=[0, 1],
**kwargs,
)
s.to_csv(p, header=False)
s.index.name = "timestamp"
s.index = | pd.to_datetime(s.index, format="%Y%m%d-%H:%M:%S:%f") | pandas.to_datetime |
import json
import numpy as np
import pandas as pd
from pathlib import Path
import argparse
import logging
import time
from sklearn.preprocessing import MinMaxScaler
def preprocess(data_name):
logger.info("Loading interaction and label data...")
u_list, i_list, ts_list, label_list = [], [], [], []
feat_l = []
idx_list = []
with open(data_name) as f:
s = next(f)
for idx, line in enumerate(f):
e = line.strip().split(',')
u = int(e[0])
i = int(e[1])
ts = float(e[2])
label = float(e[3]) # int(e[3])
feat = np.array([float(x) for x in e[4:]])
u_list.append(u)
i_list.append(i)
ts_list.append(ts)
label_list.append(label)
idx_list.append(idx)
feat_l.append(feat)
logger.info("Loading interation and label data succeeded.")
return pd.DataFrame({'u': u_list,
'i': i_list,
'ts': ts_list,
'label': label_list,
'idx': idx_list}), np.array(feat_l)
def reindex(df, bipartite=True):
new_df = df.copy()
if bipartite:
assert (df.u.max() - df.u.min() + 1 == len(df.u.unique()))
assert (df.i.max() - df.i.min() + 1 == len(df.i.unique()))
upper_u = df.u.max() + 1
new_i = df.i + upper_u
new_df.i = new_i
new_df.u += 1
new_df.i += 1
new_df.idx += 1
else:
new_df.u += 1
new_df.i += 1
new_df.idx += 1
return new_df
def run(data_name, bipartite=True):
Path("data/").mkdir(parents=True, exist_ok=True)
PATH = './data/{}_node_interactions.csv'.format(data_name)
PATH_NODE_FEAT = './data/{}_node_features.csv'.format(data_name)
OUT_DF = './data/ml_{}.csv'.format(data_name)
OUT_FEAT = './data/ml_{}.npy'.format(data_name)
OUT_NODE_FEAT = './data/ml_{}_node.npy'.format(data_name)
df, feat = preprocess(PATH)
new_df = reindex(df, bipartite)
empty = np.zeros(feat.shape[1])[np.newaxis, :]
feat = np.vstack([empty, feat])
# %%
max_idx = max(new_df.u.max(), new_df.i.max())
try:
logger.info("Trying to load graph node features...")
node_feat = | pd.read_csv(PATH_NODE_FEAT) | pandas.read_csv |
"""Tests for classifier_ensemble.ClassifierEnsemble."""
from common_python.classifier import classifier_collection
from common_python.classifier import classifier_ensemble
from common_python.classifier.classifier_ensemble \
import ClassifierEnsemble, ClassifierDescriptorSVM
from common_python.classifier.classifier_collection \
import ClassifierCollection
from common_python.testing import helpers
import common_python.constants as cn
from common_python.tests.classifier import helpers as test_helpers
import collections
import os
import pandas as pd
import random
from sklearn import svm
import numpy as np
import unittest
IGNORE_TEST = False
IS_PLOT = False
SIZE = 10
ITERATIONS = 3
values = list(range(SIZE))
values.extend(values)
DF = pd.DataFrame({
'A': values,
'B': np.repeat(1, 2*SIZE),
})
SER = | pd.Series(values) | pandas.Series |
import altair as alt
import numpy as np
import pandas as pd
import shap
import streamlit as st
from aqua import tables
from aqua._constant import forces_order
empty_axis = alt.Axis(labels=False, ticks=False, domain=False, grid=False)
xaxis = alt.Axis(labelFlush=False)
colors = {"primary": "#f63366", "grey": "#4C566A", "dark": "#2E3440"}
width = 280
height = 75
def plot_kde(
value: str = "value", variable: str = "variable", **chart_kwargs
) -> alt.Chart:
dist = (
alt.Chart(height=height, width=width, **chart_kwargs)
.transform_density(value, as_=[value, "density"], groupby=[variable])
.mark_area(color=colors["grey"], opacity=0.6)
.encode(
alt.X(value, axis=xaxis), alt.Y("density:Q", title=None, axis=empty_axis)
)
)
point = (
alt.Chart()
.mark_circle(size=120, color=colors["dark"], y="height")
.encode(alt.X(f"mean({value})", title=None, scale=alt.Scale(zero=False)))
)
bar = (
alt.Chart()
.mark_rule(size=5, color=colors["dark"], y="height")
.encode(alt.X(f"q1({value})"), alt.X2(f"q3({value})"))
)
return dist + bar + point
def plot_anthropometry(variables: pd.DataFrame) -> None:
anthropo = variables[["Height", "Weight"]].melt()
tables.describe_table(anthropo, description="variables")
plots = (
plot_kde()
.facet(data=anthropo, column=alt.Column("variable", title=None))
.resolve_scale(x="independent", y="independent")
)
st.altair_chart(plots, use_container_width=True)
# TODO caption?
def plot_forces(variables: pd.DataFrame) -> None:
forces = variables.drop(["Height", "Weight"], axis=1).melt()
forces[["type", "variable"]] = forces["variable"].str.split(expand=True)
tables.describe_table(forces, groupby=["variable", "type"], description="variables")
row_kwargs = dict(shorthand="variable", title=None, sort=forces_order)
column = alt.Column("type", title=None)
forces_plot = (
plot_kde()
.facet(
data=forces.query("type != 'Imb'"),
row=alt.Row(
header=alt.Header(labelAngle=0, labelAlign="left"), **row_kwargs
),
column=column,
)
.resolve_scale(y="independent")
.properties(bounds="flush")
)
imb_plot = (
plot_kde()
.facet(
data=forces.query("type == 'Imb'"),
row=alt.Row(header=alt.Header(labelFontSize=0), **row_kwargs),
column=column,
)
.resolve_scale(y="independent")
.properties(bounds="flush")
)
plots = (forces_plot | imb_plot).configure_facet(spacing=5)
st.altair_chart(plots)
def plot_targets(targets: pd.DataFrame) -> None:
targets_melted = targets.melt()
tables.describe_table(targets_melted, description="targets")
dist_plot = (
plot_kde()
.facet(
data=targets_melted,
row=alt.Row(
"variable",
title=None,
header=alt.Header(labelAngle=0, labelAlign="left"),
),
)
.configure_facet(spacing=5)
.resolve_scale(y="independent")
.properties(bounds="flush")
)
st.altair_chart(dist_plot)
def plot_error_dist(predictions: pd.DataFrame) -> None:
predictions_melted = predictions.melt(id_vars="target", value_vars=["MAE", "MAPE"])
tables.describe_table(predictions_melted, groupby=["target", "variable"])
row_kwargs = dict(shorthand="target", title=None, sort=forces_order)
column = alt.Column("variable", title=None)
mae = (
plot_kde()
.facet(
data=predictions_melted.query("variable == 'MAE'"),
row=alt.Row(
header=alt.Header(labelAngle=0, labelAlign="left"), **row_kwargs
),
column=column,
)
.resolve_scale(y="independent")
.properties(bounds="flush")
)
mape = (
plot_kde()
.facet(
data=predictions_melted.query("variable == 'MAPE'"),
row=alt.Row(header=alt.Header(labelFontSize=0), **row_kwargs),
column=column,
)
.resolve_scale(y="independent")
.properties(bounds="flush")
)
plots = (mae | mape).configure_facet(spacing=5)
st.altair_chart(plots)
def plot_correlation_matrix(variables: pd.DataFrame, targets: pd.DataFrame) -> None:
data = variables.join(targets).corr()
col_order = data.columns.to_list()
half_corr = (
data.where(np.triu(np.ones(data.shape)).astype(np.bool))
.reset_index()
.melt(id_vars="index")
.dropna()
)
plot_dimension = 600
corr = (
alt.Chart(half_corr, width=plot_dimension, height=plot_dimension)
.mark_rect()
.encode(
alt.X("index", sort=col_order, title=None),
alt.Y("variable", sort=col_order, title=None),
alt.Tooltip("value"),
alt.Color(
"value", scale=alt.Scale(scheme="redblue", domain=[-1, 1]), title=None
),
)
.configure_view(strokeWidth=0)
)
st.altair_chart(corr)
def plot_error_residuals(predictions: pd.DataFrame) -> None:
points = (
alt.Chart(predictions.eval("Residuals = predicted - real"))
.mark_circle(size=100)
.encode(
alt.X("predicted", title="Predicted", scale=alt.Scale(zero=False)),
alt.Y("Residuals", title="Residuals"),
alt.Color("target"),
)
)
rule = alt.Chart( | pd.DataFrame([{"zero": 0}]) | pandas.DataFrame |
import numpy as np
import pandas as pd
def colley(
goals_home, goals_away, teams_home, teams_away, include_draws=True, draw_weight=0.5
) -> pd.DataFrame:
"""
Calculates each team's Colley ratings
Parameters
----------
goals_home : list
List of goals scored by the home teams
goals_away : list
List of goals scored by the away teams
teams_home : list
List of names of the home teams
teams_away : list
List of names of the away teams
include_draws : bool
Should tied results be included in the ratings?
draw_weight : float
if include_draws is `True` then this sets the weighting applied to tied scores. For example `0.5` means
a draw is worth half a win, `0.333` means a draw is a third of a win etc
Returns
------
A dataframe containing colley ratings per team
"""
teams = np.sort(np.unique(np.concatenate([teams_home, teams_away])))
fixtures = _build_fixtures(goals_home, goals_away, teams_home, teams_away)
C, b = _build_C_b(fixtures, teams, include_draws, draw_weight)
r = _solve_r(C, b)
r = pd.DataFrame([teams, r]).T
r.columns = ["team", "rating"]
r = r.sort_values("rating", ascending=False)
r = r.reset_index(drop=True)
return r
def _build_fixtures(goals_home, goals_away, teams_home, teams_away):
fixtures = | pd.DataFrame([goals_home, goals_away, teams_home, teams_away]) | pandas.DataFrame |
import warnings
import threading
import numpy as np
import pandas as pd
import numba as nb
import pvlib
from pvlib import tools
from pvlib.solarposition import _spa_python_import
from pvlib.location import Location
spa = _spa_python_import('numba')
def haurwitz(apparent_zenith):
"""Caluclate global horizontal irradiance from apparent zenith angle of sun
using Haurwitz method.
Parameters
----------
apparent_zenith : array_like
Apparent zenith angle of sun in degrees
Returns
-------
array_like
Global horizontal irradiance
Notes
-----
Based on `pvlib.clearsky.haurwitz`
"""
cos_zenith = tools.cosd(apparent_zenith)
clearsky_ghi = np.zeros_like(apparent_zenith)
cos_zen_gte_0 = cos_zenith > 0
clearsky_ghi[cos_zen_gte_0] = (1098.0 * cos_zenith[cos_zen_gte_0] *
np.exp(-0.059/cos_zenith[cos_zen_gte_0]))
return clearsky_ghi
@nb.jit('void(float64[:], float64[:], float64[:], float64[:], float64[:,:])',
nopython=True, nogil=True)
def _solar_position_loop(unixtime, lats, lons, loc_args, out):
"""Modify the array `out` array inplace to input the calculated solar
position at unixtime times and lats-lons locations.
Notes
-----
Based on `pvlib.spa.solar_position_loop` function.
For now we assume the elevation, pressure and temp are the same at all
locations and times. This is just for a simple approximation.
"""
#
elev = loc_args[0]
pressure = loc_args[1]
temp = loc_args[2]
delta_t = loc_args[3]
atmos_refract = loc_args[4]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = spa.julian_day(utime)
jde = spa.julian_ephemeris_day(jd, delta_t)
jc = spa.julian_century(jd)
jce = spa.julian_ephemeris_century(jde)
jme = spa.julian_ephemeris_millennium(jce)
R = spa.heliocentric_radius_vector(jme)
L = spa.heliocentric_longitude(jme)
B = spa.heliocentric_latitude(jme)
Theta = spa.geocentric_longitude(L)
beta = spa.geocentric_latitude(B)
x0 = spa.mean_elongation(jce)
x1 = spa.mean_anomaly_sun(jce)
x2 = spa.mean_anomaly_moon(jce)
x3 = spa.moon_argument_latitude(jce)
x4 = spa.moon_ascending_longitude(jce)
delta_psi = spa.longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = spa.obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = spa.mean_ecliptic_obliquity(jme)
epsilon = spa.true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = spa.aberration_correction(R)
lamd = spa.apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = spa.mean_sidereal_time(jd, jc)
v = spa.apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = spa.geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = spa.geocentric_sun_declination(lamd, epsilon, beta)
m = spa.sun_mean_longitude(jme)
eot = spa.equation_of_time(m, alpha, delta_psi, epsilon)
for j in range(lats.shape[0]):
lat = lats[j]
lon = lons[j]
H = spa.local_hour_angle(v, lon, alpha)
xi = spa.equatorial_horizontal_parallax(R)
u = spa.uterm(lat)
x = spa.xterm(u, lat, elev)
y = spa.yterm(u, lat, elev)
delta_alpha = spa.parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = spa.topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = spa.topocentric_local_hour_angle(H, delta_alpha)
e0 = spa.topocentric_elevation_angle_without_atmosphere(lat, delta_prime, H_prime)
delta_e = spa.atmospheric_refraction_correction(pressure, temp, e0, atmos_refract)
e = spa.topocentric_elevation_angle(e0, delta_e)
theta = spa.topocentric_zenith_angle(e)
out[i, j] = theta
def _solar_position_numba(unixtime, lats, lons, elev, pressure, temp, delta_t,
atmos_refract, numthreads):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
Notes
-----
Based on `pvlib.spa.solar_position_numba` function.
"""
# these args are the same for each thread
loc_args = np.array([elev, pressure, temp, delta_t, atmos_refract])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
results = np.zeros((ulength, lats.shape[0]), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.'.format(ulength))
numthreads = ulength
if numthreads <= 1:
_solar_position_loop(unixtime, lats, lons, loc_args, results)
return results
# split the input and output arrays into numthreads chunks
time_split = np.array_split(unixtime, numthreads)
results_split = np.array_split(results, numthreads)
chunks = [[time_split[i], lats, lons, loc_args, results_split[i]] for i in range(numthreads)]
# Spawn one thread per chunk
threads = [threading.Thread(target=_solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return results
def spa_python(times, latitudes, longitudes,
altitude=0, pressure=101325, temperature=12, delta_t=67.0,
atmos_refract=None, numthreads=4, **kwargs):
"""
Calculate the solar position using a python implementation of the
NREL SPA algorithm [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Must be localized or UTC will be assumed.
latitudes : array_like, float
Latitudes in decimal degrees. Positive north of equator, negative
to south.
longitudes : array_like, float
Longitudes in decimal degrees. Positive east of prime meridian,
negative to west.
altitude : float, default 0
Distance above sea level.
pressure : int or float, optional, default 101325
avg. yearly air pressure in Pascals.
temperature : int or float, optional, default 12
avg. yearly air temperature in degrees C.
delta_t : float, optional, default 67.0
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.*
The USNO has historical and forecasted delta_t [3].
atmos_refrac : None or float, optional, default None
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads : int, optional, default 4
Number of threads to use if how == 'numba'.
Returns
-------
array_like
Apparent zenith (degrees) with time along zeroth axis and location along
first axis.
References
----------
.. [1] <NAME> and <NAME>, Solar position algorithm for solar
radiation applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
Notes
-----
Based on `pvlib.solarposition.spa_python` function.
"""
lats = latitudes
lons = longitudes
elev = altitude
pressure = pressure / 100 # pressure must be in millibars for calculation
atmos_refract = atmos_refract or 0.5667
if not isinstance(times, pd.DatetimeIndex):
try:
times = | pd.DatetimeIndex(times) | pandas.DatetimeIndex |
import dataiku
import pandas as pd
import requests
from io import StringIO
from pandas.io.json import json_normalize
import datetime
from dateutil import parser
from co2_converter_common import date_chunk
from dataiku.customrecipe import get_input_names_for_role, get_recipe_config, get_output_names_for_role
# Inputs
input_names = get_input_names_for_role('input_ds')
input_datasets = [dataiku.Dataset(name) for name in input_names]
input_dataset = input_datasets[0]
# Outputs
output_names = get_output_names_for_role('output_ds')
output_datasets = [dataiku.Dataset(name) for name in output_names]
output_dataset = output_datasets[0]
# Load input DSS dataset as a Pandas dataframe
input_df = input_dataset.get_dataframe()
# Load input Parameters:
APIProvider = get_recipe_config().get('APIProvider')
DateColName = get_recipe_config().get('DateColName')
ConsumptionColName = get_recipe_config().get('ConsumptionColName')
# API endpoint parameters conditions:
if APIProvider == 'RTE':
API_ENDPOINT = 'https://opendata.reseaux-energies.fr/api/records/1.0/download/'
if APIProvider == 'ElectricityMap':
API_ENDPOINT = 'https://api.electricitymap.org/v3/carbon-intensity/past-range'
API_TOKEN = get_recipe_config().get("api_configuration_preset").get("APITOKEN")
lat = get_recipe_config().get('LatColName')
lon = get_recipe_config().get('LonColName')
# Input validation:
# # Check if columns are in input dataset
columns_names = input_df.columns
# ## Date Column:
if DateColName not in columns_names:
raise Exception("Not able to find the '%s' column" % DateColName)
# ## Consumption Column:
if ConsumptionColName not in columns_names:
raise Exception("Not able to find the '%s' column" % ConsumptionColName)
# ## Latitude and longitude Column:
if APIProvider == 'ElectricityMap':
if lat not in columns_names:
raise Exception("Not able to find the '%s' column" % lat)
if lon not in columns_names:
raise Exception("Not able to find the '%s' column" % lon)
# # Check input data validity:
# ##Latitude and longitude
if APIProvider == 'ElectricityMap':
if input_df[lat].min() < -90:
raise Exception("Latitude value is below -90.")
if input_df[lat].max() > 90:
raise Exception("Latitude value is over 90.")
if input_df[lon].min() < -180:
raise Exception("longitude value is below -180.")
if input_df[lon].max() > 180:
raise Exception("longitude value is over 180.")
# API token validity:
if API_TOKEN is None:
raise Exception("No electricityMap API token found.")
# setup request
r = requests.session()
# ##################################### RTE ######################################
if APIProvider == 'RTE':
# Get MinDate and MaxDate to compute the number of rows to be requested:
MinDate = min(input_df[DateColName])
MaxDate = max(input_df[DateColName])
# Modify MinDate and MaxDate to have the right format for the query
min_date = MinDate.isoformat()
max_date = MaxDate.isoformat()
# Convert DateColName to datetime format:
input_df[DateColName] = | pd.to_datetime(input_df[DateColName]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import datetime
import csv
import json
class Combiner:
def __init__(self, path):
self.assignments = None
self.domain_by_customer = None
self.customer_by_domain = None
self.metrics = None
self.todo_owners = None
self.path = path
"""
Helper function that extracts the domain part from emails
"""
def extract_domain(self,email):
mail_split = email.split("@")
if(len(mail_split) > 1):
return mail_split[1]
else:
return email
"""
Once the combiner is loaded or after the set_*-methods have been run, this can be used, to add
features to a sample dataframe. It uses the base data from the issues and enriches it with simple to calculate features.
"""
def samples_enrich(self, samples):
#Extract customer's domain from raised_by field
samples.loc[:,"domain"] = samples.loc[:,"raised_by"].apply(self.extract_domain)
#Convert datatypes
dt_cols = ["creation", "modified"]
for col in dt_cols:
samples[col] = pd.to_datetime(samples[col], format="%Y-%m-%d %H:%M:%S.%f")
#Add Day of week, when issue was created
samples.loc[:,"day_of_week"] = samples.loc[:,"creation"].dt.weekday
#Add Year, when issue was created
samples.loc[:,"year"] = samples.loc[:,"creation"].dt.strftime("%Y")
#Add calendar week
samples.loc[:,"calendar_week"] = samples.loc[:,"creation"].dt.weekofyear
#Add a start_date
samples["start_date"] = samples["creation"].dt.strftime("%Y-%m-%d")
return samples
"""
This function creates domain mappings for customers. Since a customer can have multiple domains,
the function takes the domain of the email addresses that is most common.
"""
def create_domain_mappings(self, contacts):
#Extract customer's domain from email_ids
contacts.loc[:,"email_ids"] = contacts.loc[:,"email_ids"].astype(str)
mail_split = contacts.loc[:,"email_ids"].str.split(pat = "@",expand=True)
contacts.loc[:,"domain"] = mail_split.loc[:,1]
#Uniquely group by customer and domain
mapping = contacts.loc[:,["customer","domain","name"]]
mapping = mapping.dropna(subset=["customer","domain"],axis="rows").sort_values("customer")
mapping = mapping.groupby(["customer","domain"],as_index=False)["name"].count()
mapping = mapping.rename(columns={"name": "contact count"}).sort_values("contact count",ascending=False)
#The problem customers have multiple domains -> Take the domain with the highest contact count
customers = mapping["customer"].unique().tolist()
domain_by_customer = {}
customer_by_domain = {}
for customer in customers:
max_contacts = mapping.loc[mapping["customer"] == customer,"contact count"].max()
is_customer = mapping["customer"] == customer
is_max_contacts = mapping["contact count"] == max_contacts
is_best_domain = mapping.loc[(is_customer & is_max_contacts),"domain"].iloc[0]
domain_by_customer[customer] = is_best_domain
customer_by_domain[is_best_domain] = customer
#print("Customer: " + customer + " Datatype: " + str(type(customer)) + " Domain: " + is_best_domain)
self.domain_by_customer = domain_by_customer
self.customer_by_domain = customer_by_domain
"""
Once the domain mappings are created this can be used to add the domain for given email addresses in samples
"""
def samples_map_domains(self, samples):
samples = samples.copy()
#Map customer to domain
is_tueit = samples["domain"] == "tueit.de"
samples.loc[(is_tueit),"domain"] = samples.loc[(is_tueit),"customer"].map(self.domain_by_customer)
#Map domain to customer (This potentially creates NaN's in customer...)
is_foreign_domain = samples["domain"] != "tueit.de"
customer_missing = (samples["customer"] == "00062") | (samples["customer"].isna())
samples.loc[(is_foreign_domain & customer_missing),"customer"] = samples.loc[(is_foreign_domain & customer_missing),"domain"].map(self.customer_by_domain)
return samples
"""
This routine can be called once combiner is loaded and is the standard routine for feature generation for samples and training data likewise.
"""
def process_samples(self, samples):
#Adding everything to the sample
print("Shape of samples before processing: " + str(samples.shape))
samples = self.samples_enrich(samples)
samples = self.samples_map_domains(samples)
samples = self.lookup_metrics(samples)
print("Shape of samples after processing: " + str(samples.shape))
return samples
"""
This method initializes the combiner
"""
def set_assignments(self, issues, todos, contacts):
assignments = pd.merge(todos.add_prefix("todo_"),issues, left_on="todo_reference_name", right_on="name", how="left")
assignments = self.samples_enrich(assignments)
self.create_domain_mappings(contacts)
assignments = self.samples_map_domains(assignments)
self.assignments = assignments
employees = assignments["todo_owner"].unique()
self.todo_owners = employees[~pd.isnull(employees)].tolist() #Removes nans
"""
This is the second method to call when initializing the combiner
"""
def set_timesheets(self, timesheets, employees, projects, customers):
#Add customer from the issues
print("Combiner: Adding customer from issues.")
assignments = self.assignments.rename(columns={"name": "issue"})
timesheets = pd.merge(timesheets, assignments.loc[:,["issue","customer"]], on="issue", how="left")
#Add Employee id (user_id)
print("Combiner: Adding user_id to Assignments.")
employees = employees.rename(columns={"name" : "employee"})
timesheets = timesheets.merge(employees,on="employee",how="left")
#Add Project
print("Combiner: Adding project to Assignments.")
projects = projects.rename(columns={"name" : "project"})
timesheets = pd.merge(timesheets,projects,on="project",how="left", suffixes=("_from_issues","_from_projects"))
#Add Customer
print("Combiner: Adding customer to Assignments.")
timesheets = timesheets.drop("customer_name",axis=1)
customers = customers.rename(columns={"name" : "customer"})
timesheets["customer"] = np.where((timesheets["customer_from_issues"].isnull()),timesheets["customer_from_projects"],timesheets["customer_from_issues"])
timesheets = timesheets.drop("customer_from_issues", axis=1)
timesheets = timesheets.drop("customer_from_projects", axis=1)
timesheets = pd.merge(timesheets,customers, on="customer", how="left")
#Basic transformation
timesheets["start_date"] = | pd.to_datetime(timesheets["start_date"]) | pandas.to_datetime |
import os
import pandas as pd
#from pandas.util.testing import assert_frame_equal
from rdkit import Chem
from rdkit.Chem import AllChem
import metamoles
from metamoles import cheminform
#from metamoles import *
#Tests for the RDKit molecular similarity functions
#Requires metamoles/test/playground_df_cleaned_kegg_with_smiles.csv to be in the same directory for tests to pass.
data_path = os.path.join(metamoles.__path__[0], 'data')
def test_input_data():
"""Tests input_data function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.input_data(input_df)
assert isinstance(test_df, pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_fingerprint_products():
"""Tests fingerprint_products function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.input_data(input_df)
assert isinstance(cheminform.fingerprint_products(test_df), pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_sim_i_j():
"""Tests sim_i_j function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))
A = test_df.iloc[0]
#B = test_df.iloc[1]
#C = test_df.iloc[2]
assert cheminform.sim_i_j(A, A) == 1, "Self correlation is broken"
#assert metamoles.sim_i_j(A, B) == -1, "Standard correlation is broken"
#assert metamoles.sim_i_j(A, C) == 0, "Standard correlation is broken"
return '1/1 tests successful'
def test_sim_i_all():
"""Test sim_i_all function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))
metric = pd.DataFrame()
assert metric.empty == True, """ShapeError, input metric dataframe
should be initialized as empty"""
for index, row in test_df.iterrows():
assert cheminform.sim_i_all(test_df, index, row, metric) == None, """OutputError, function
shouldn't return anything"""
assert metric[index].all() >= 0 and metric[index].all() <= 1.0, """ValueError,
metric should be between 0 and 1"""
return "3/3 Tests successful"
def test_sim_metric():
"""Test sim_i_all function in metamoles.py"""
input_df = | pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv") | pandas.read_csv |
import time
from collections import defaultdict
from random import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Flatten, Reshape, ReLU
from tensorflow.math import exp, sqrt, square
from tensorflow import keras
import os, logging
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from collections import defaultdict
from functools import partial
import math
LEARN_RATE = 1e-3
BETA_1 = 0.1
class tfEmbedder(tf.keras.Model):
def __init__(
self,
vocab_size,
embedding_dim,
pretrain_matrix=None,
freeze=False,
use_tfidf=False,
):
super(tfEmbedder, self).__init__()
# For 2470: TF-IDF is the importance of the word based on the whole input log
self.use_tfidf = use_tfidf
if pretrain_matrix is not None:
num_tokens = pretrain_matrix.shape[0],
assert (vocab_size == num_tokens)
assert (embedding_dim == pretrain_matrix.shape[1])
self.embedding_layer = tf.keras.layers.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim,
mask_zero=True,
embeddings_initializer=keras.initializers.Constant(pretrain_matrix),
trainable=True,
)
else:
self.embedding_layer = tf.keras.layers.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim,
mask_zero=True,
embeddings_initializer=keras.initializers.Constant(pretrain_matrix),
trainable=True,
)
def call(self, x):
if self.use_tfidf:
return tf.matmul(x, self.embedding_layer.weight.double())
else:
return self.embedding_layer(x)
class tf_BasedModel(tf.keras.Model):
def __init__(
self,
meta_data,
batch_sz,
model_save_path,
feature_type,
label_type,
eval_type,
topk,
use_tfidf,
embedding_dim,
cp_callback=None,
freeze=False,
gpu=-1,
anomaly_ratio=None,
patience=3,
**kwargs,
):
super(tf_BasedModel, self).__init__()
self.batch_sz = batch_sz
self.topk = topk
self.meta_data = meta_data
self.feature_type = feature_type
self.label_type = label_type
self.eval_type = eval_type
self.anomaly_ratio = anomaly_ratio # only used for auto encoder
self.patience = patience
self.time_tracker = {}
self.learning_rate = LEARN_RATE
self.beta_1 = BETA_1
self.optimizer = None
self.embedding_dim = embedding_dim
os.makedirs(model_save_path, exist_ok=True)
self.model_save_file = model_save_path
self.vocab_size = meta_data["vocab_size"]
self.cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=self.model_save_file,
verbose=1,
save_weights_only=True,
save_freq=5)
assert (feature_type in ["sequentials", "semantics"])
self.embedding_matrix = tf.Variable(
tf.random.truncated_normal([meta_data["vocab_size"], self.embedding_dim], dtype=tf.float32, mean=0,
stddev=1 / math.sqrt(self.embedding_dim)))
def evaluate(self, test_loader, dtype="test"):
logging.info("Evaluating {} data.".format(dtype))
if self.label_type == "next_log":
return self.__evaluate_next_log(test_loader, dtype=dtype)
elif self.label_type == "anomaly":
return self.__evaluate_anomaly(test_loader, dtype=dtype)
elif self.label_type == "none":
raise RuntimeError("Not implemented")
def __evaluate_anomaly(self, test_loader, dtype="test"):
y_pred = []
store_dict = defaultdict(list)
infer_start = time.time()
for batch_idx, batch_list in enumerate(test_loader):
item_dict = defaultdict(list)
for items in batch_list:
item_dict['session_idx'].append(items['session_idx'])
item_dict['features'].append(items['features'])
item_dict['window_labels'].append(items['window_labels'])
item_dict['window_anomalies'].append(items['window_anomalies'])
item_dict['session_idx'] = np.array(item_dict['session_idx']).reshape((self.batch_sz,)).tolist()
item_dict['features'] = np.array(item_dict['features']).reshape((self.batch_sz, -1))
item_dict['window_labels'] = np.array(item_dict['window_labels']).reshape((self.batch_sz,)).tolist()
item_dict['window_anomalies'] = np.array(item_dict['window_anomalies']).reshape((self.batch_sz,)).tolist()
return_dict = self.call(item_dict)
_y_pred = tf.math.argmax(return_dict["y_pred"],axis=1).numpy().tolist()
y_prob = tf.gather(return_dict["y_pred"], indices=_y_pred, axis=1).numpy().tolist()
y_pred.append(_y_pred)
store_dict["session_idx"].extend(
item_dict["session_idx"]
)
store_dict["window_anomalies"].extend(
item_dict["window_anomalies"]
)
store_dict["window_preds"].extend(_y_pred)
store_dict["window_probs"].extend(y_prob)
infer_end = time.time()
logging.info("Finish inference. [{:.2f}s]".format(infer_end - infer_start))
self.time_tracker["test"] = infer_end - infer_start
store_df = pd.DataFrame(store_dict)
use_cols = ["session_idx", "window_anomalies", "window_preds"]
session_df = store_df[use_cols].groupby("session_idx", as_index=False).sum()
pred = list(map(int,(session_df[f"window_preds"] > 0).tolist()))
y = list(map(int, (session_df["window_anomalies"] > 0).tolist()))
eval_results = {
"f1": f1_score(y, pred, average='weighted', labels=np.unique(y)),
"rc": recall_score(y, pred, average='weighted', labels=np.unique(y)),
"pc": precision_score(y, pred, average='weighted', labels=np.unique(y)),
"acc": accuracy_score(y, pred),
}
logging.info({k: f"{v:.3f}" for k, v in eval_results.items()})
return eval_results
def __evaluate_next_log(self, test_loader, dtype="test"):
y_pred = []
store_dict = defaultdict(list)
infer_start = time.time()
for batch_idx, batch_list in enumerate(test_loader):
item_dict = defaultdict(list)
for items in batch_list:
item_dict['session_idx'].append(items['session_idx'])
item_dict['features'].append(items['features'])
item_dict['window_labels'].append(items['window_labels'])
item_dict['window_anomalies'].append(items['window_anomalies'])
item_dict['session_idx'] = np.array(item_dict['session_idx']).reshape((self.batch_sz, )).tolist()
item_dict['features'] = np.array(item_dict['features']).reshape((self.batch_sz, -1))
item_dict['window_labels'] = np.array(item_dict['window_labels']).reshape((self.batch_sz,)).tolist()
item_dict['window_anomalies'] = np.array(item_dict['window_anomalies']).reshape((self.batch_sz,)).tolist()
return_dict = self.call(item_dict)
y_pred = return_dict["y_pred"]
y_prob_topk, y_pred_topk = tf.math.top_k(input=y_pred, k=self.topk)
store_dict["session_idx"].extend(
item_dict["session_idx"]
)
store_dict["window_anomalies"].extend(
item_dict["window_anomalies"]
)
store_dict["window_labels"].extend(item_dict["window_labels"])
store_dict["x"].extend(item_dict["features"])
store_dict["y_pred_topk"].extend(y_pred_topk)
store_dict["y_prob_topk"].extend(y_prob_topk)
infer_end = time.time()
logging.info("Finish inference. [{:.2f}s]".format(infer_end - infer_start))
self.time_tracker["test"] = infer_end - infer_start
store_df = pd.DataFrame(store_dict)
best_result = None
best_f1 = -float("inf")
count_start = time.time()
topkdf = pd.DataFrame(store_df["y_pred_topk"].tolist())
logging.info("Calculating acc sum.")
hit_df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
"""
This monte carlo algorithm aproximates the "true" value of the interesting
parameter/s using a random walk of normally distributed steps with mean 0 or
a mean of the last accepted step in the walk for the parameter.
"""
truth=5
tss = []
for j in range(50):
ts = []
stepsizes = [.01,.05,.1,.5,1,5,10]
index=0
while len(ts) < len(stepsizes):
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
stepsize=stepsizes[index]
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
if score1 <=.5:
ts.append(t)
index+=1
tss.append(ts)
tss=np.array(tss)
stepsize = stepsizes[np.argmin(np.mean(tss,axis=0))]
truth = 5
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv")
pd.set_option("display.max_columns",500)
dat.tail()
covars = ['age','anaemia','creatinine_phosphokinase',
'diabetes','ejection_fraction','high_blood_pressure',
'platelets','serum_creatinine','serum_sodium',
'sex','smoking','time']
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
Yodds = Y/(1-Y)
Yodds = np.where(Yodds==np.inf,1e16,1e-16)
Ylogodds = np.log(Yodds)
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
X['int']=1
random.seed(42)
index = np.array(random.choices([1,2,3,4,5],k=len(X)))
xv = X[index==5].copy()
yv = Ylogodds[index==5].copy()
xt = X[index!=5].copy()
yt = Ylogodds[index!=5].copy()
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt)
predtlogodds = xt@coefs
predvlogodds = xv@coefs
predt=np.exp(predtlogodds)/(1+np.exp(predtlogodds))
predt=np.where(predt>.5,1,0)
predv=np.exp(predvlogodds)/(1+np.exp(predvlogodds))
predv=np.where(predv>.5,1,0)
act_t = np.exp(yt)/(1+np.exp(yt))
act_t=np.where(act_t>.5,1,0)
act_v = np.exp(yv)/(1+np.exp(yv))
act_v=np.where(act_v>.5,1,0)
logregt_acc=sum(np.where(predt==act_t,1,0))/len(predt)
logregv_acc = sum(np.where(predv==act_v,1,0))/len(predv)
print("logreg training acc:",logregt_acc,"val acc:",logregv_acc)
from sklearn.linear_model import LogisticRegression
xv = X[index==5].copy()
yv = Y[index==5].copy()
xt = X[index!=5].copy()
yt = Y[index!=5].copy()
lr = LogisticRegression(fit_intercept=False,solver = 'newton-cg',penalty='l2')
lr.fit(xt,yt)
sum(np.where(lr.predict(xt)==yt,1,0))/len(yt)
sum(np.where(lr.predict(xv)==yv,1,0))/len(yv)
#BASE KNN Maximizing Recall
from sklearn.neighbors import KNeighborsClassifier
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=4
k=16
def model_precision(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
initscores.append(score)
score=np.mean(initscores)
return score
def model_recall(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
return score
def sequential_MCMC(X,Y,model_fn,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20,):
#INITIAL SCORE
w0 = np.ones(len(X.columns.values))
score = model_fn(X,Y,w0,k)
scoreinit=score
wfin = []
scores = []
while len(wfin)<draws:
noupdate=0
deltachosen=False
stepsize=stepsize
score=scoreinit
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(X.columns.values))
while noupdate<no_update_limit:
w1 = w0+np.random.normal(delta,stepsize,len(X.columns.values))
score2 = model_fn(X,Y,w1,k)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if noupdate%delta_reset==delta_reset:
deltachosen=False
stepsize=stepsize*step_shrinkage
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
return(wfin_arr,scores)
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_precision,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.median
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_recall,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
stepsize=.1
w0=np.ones(len(covars))
delta=np.random.normal(0,stepsize/2,len(covars))
knn.fit(xt*w0,yt)
tp=sum(np.where((knn.predict(xv*w0)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w0)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w0)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w0)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
scoreinit=score
#sum(np.where(knn.predict(xv*w0)==yv,1,0))/len(yv)
#sum(np.where(knn.predict(xt*w0)==yt,1,0))/len(yt)
wfin=[]
scores = []
while len(wfin)<30:
noupdate=0
deltachosen=False
score=scoreinit
stepsize=.1
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(covars))
#iteration=0
while noupdate<120:
#iteration+=1
#val = iteration%4+1
score2list=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
w1 = w0+np.random.normal(delta,stepsize,len(covars))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w1,yt)
tp=sum(np.where((knn.predict(xv*w1)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w1)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w1)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w1)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score2 = sum(np.where(knn.predict(xv*w1)==yv,1,0))/len(yv)
score2 = recall
score2list.append(score2)
score2=np.mean(score2list)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(covars))
if noupdate%20==20:
deltachosen=False
stepsize=stepsize*.9
delta=np.random.normal(0,stepsize/2,len(covars))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.mean
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
scores_ordered = sorted(range(len(scores)), key=lambda k: scores[k])
wfin_sorted = wfin_arr[scores_ordered]
wfin_selected = wfin_sorted[15:]
wf_sort=method(wfin_selected,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf_sort,yt)
tp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
#BASE KNN Maximizing Precision
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings("ignore")
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=17
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
stepsize=.1
w0=np.ones(len(covars))
delta=np.random.normal(0,stepsize/2,len(covars))
knn.fit(xt*w0,yt)
tp=sum(np.where((knn.predict(xv*w0)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w0)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w0)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w0)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = round(precision,5)
initscores.append(score)
score=np.mean(initscores)
scoreinit=round(score,5)
#sum(np.where(knn.predict(xv*w0)==yv,1,0))/len(yv)
#sum(np.where(knn.predict(xt*w0)==yt,1,0))/len(yt)
wfin=[]
scores = []
while len(wfin)<30:
noupdate=0
deltachosen=False
score=scoreinit
stepsize=.1
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(covars))
#iteration=0
while noupdate<120:
#iteration+=1
#val = iteration%4+1
score2list=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
w1 = w0+np.random.normal(delta,stepsize,len(covars))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w1,yt)
tp=sum(np.where((knn.predict(xv*w1)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w1)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w1)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w1)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score2 = sum(np.where(knn.predict(xv*w1)==yv,1,0))/len(yv)
score2 = round(precision,5)
score2list.append(score2)
score2=round(np.mean(score2list) ,5 )
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(covars))
if noupdate%20==20:
deltachosen=False
stepsize=stepsize*.9
delta=np.random.normal(0,stepsize/2,len(covars))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.mean
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('reweighted precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('unweighted precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
scores_ordered = sorted(range(len(scores)), key=lambda k: scores[k])
wfin_sorted = wfin_arr[scores_ordered]
wfin_selected = wfin_sorted[15:]
wf_sort=method(wfin_selected,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf_sort,yt)
tp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==1),1,0))
print('selective precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
len(yv)
#BASE KNN Maximizing Precision & Recall
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings("ignore")
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = (precision+recall)/2
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=10
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~ | pd.Series(index) | pandas.Series |
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either, Bool, Enum
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = List(Int, help="""
A list of selected indices on this DataSource.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single
argument that is a dict, that argument is used as the value for
the "data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict, treat
that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self.from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
# TODO: (bev) why not just return a ColumnDataSource?
@classmethod
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = data.index
new_data = {}
for colname in data:
new_data[colname] = data[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return | pd.DataFrame(self.data, columns=self.column_names) | pandas.DataFrame |
import argparse
import gc
import json
import logging
from pathlib import Path
import feather
import numpy as np
import lightgbm as lgb
import pandas as pd
from scipy import sparse as sp
from tqdm import tqdm
import config as cfg
from predictors import GBMFeatures, GBMPredictor
from utils import (
ProductEncoder,
make_coo_row,
normalized_average_precision,
get_shard_path,
cache_to_feather,
get_check_users,
)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(message)s",
handlers=[logging.FileHandler("collect_dataset.log"), logging.StreamHandler()],
)
def get_gbm_records(shard_indices, gbm_feat, max_records=None, **kwargs):
check_users = get_check_users()
gbm_records = []
num_records = 0
for shard_idx in tqdm(shard_indices, leave=False):
for js in tqdm(
(json.loads(s) for s in open(get_shard_path(shard_idx))), leave=False
):
if js["client_id"] in check_users:
continue
feat_records, _ = gbm_feat.get_gbm_features(js, train=True, **kwargs)
gbm_records.extend(feat_records)
num_records += 1
if max_records and num_records >= max_records:
return gbm_records
return gbm_records
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--N", type=int, default=100)
parser.add_argument("--max-records", type=int, default=None)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
N_POOL = args.N
MAX_RECORDS = args.max_records
logger = logging.getLogger(__name__)
ASSETS_DIR = cfg.ASSETS_DIR
NUM_TEST_SHARD = 15
# to test pipeline
SHARDS = [14]
# full training
# SHARDS = range(15)
gbm_feat = GBMFeatures(
product_csv_path=ASSETS_DIR / "products.csv",
model_pickled_path=ASSETS_DIR / "model_implicit_cosine_50.pkl",
products_misc_path=ASSETS_DIR / "products_misc.csv",
product_features_encoder_path=ASSETS_DIR / "product_features.pkl",
implicit_tfidf_path=ASSETS_DIR / "model_implicit_tf_idf100.pkl",
implicit_als_path=ASSETS_DIR / "model_implicit_als_16fact_12iter.pkl",
implicit_cosine2_path=ASSETS_DIR / "model_implicit_cosine2.pkl",
umap_item_emb_path=ASSETS_DIR / "umap_item_emb.npy",
item_co_occurrence_path=ASSETS_DIR / "item_co_occurrence_min_cnt_5.npz",
item_occurrence_path=ASSETS_DIR / "item_occurrence.npy",
user_prod_log_idf_path=ASSETS_DIR / "user_prod_log_idf.npy",
tran_prod_log_idf_path=ASSETS_DIR / "tran_prod_log_idf.npy",
N=N_POOL,
# trunk_svd_arr_path=ASSETS_DIR / "svd_128_components_T.npy",
# faiss_index_path=str(ASSETS_DIR / "faiss_base.idx"),
# train_scores_path=ASSETS_DIR / "X_scores_sparse.npz",
# faiss_neighbors=512,
# faiss_nprobe=16,
)
train_dir = Path(f"../tmp/train_chunks_{gbm_feat.N}")
train_dir.mkdir(exist_ok=True)
test_dir = Path(f"../tmp/test_chunks_{gbm_feat.N}")
test_dir.mkdir(exist_ok=True)
logger.info("Collecting train dataset")
for num_shard in tqdm(SHARDS, leave=False):
gbm_rec_train = get_gbm_records([num_shard], gbm_feat, max_records=MAX_RECORDS)
df_gbm_train_chunk = | pd.DataFrame(gbm_rec_train) | pandas.DataFrame |
import logging
import pandas as pd
import typing
from multipledispatch import dispatch
from cell_imaging_utils.datasets_metadata.dict.datasetes_metadata_abstract_dict import DatasetsMetaDataAbstractDict
log = logging.getLogger(__name__)
"""
DatasetsMetaDataPickle
------------------
Pickle implementation of DatasetMetadataAbstract
"""
class DatasetMetadataPickle(DatasetsMetaDataAbstractDict):
def __init__(self, destenation, source=None) -> None:
super().__init__(destenation, source)
if (self.source is not None):
self.data = pd.read_pickle(self.source)
else:
self.data = | pd.DataFrame([]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import gc
import os
# read data
col_dict = {'mjd': np.float64, 'flux': np.float32, 'flux_err': np.float32, 'object_id': np.int32, 'passband': np.int8,
'detected': np.int8}
train_meta = pd.read_csv(os.path.join('data', 'training_set_metadata.csv'))
train = pd.read_csv(os.path.join('data', 'training_set.csv'), dtype=col_dict)
def calc_aggs(all_data, exact):
# Normalise the flux, following the Bayesian approach here:
# https://www.statlect.com/fundamentals-of-statistics/normal-distribution-Bayesian-estimation
# Similar idea (but not the same) as the normalisation done in the Starter Kit
# https://www.kaggle.com/michaelapers/the-plasticc-astronomy-starter-kit?scriptVersionId=6040398
prior_mean = all_data.groupby(['object_id', 'passband'])['flux'].transform('mean')
prior_std = all_data.groupby(['object_id', 'passband'])['flux'].transform('std')
prior_std.loc[prior_std.isnull()] = all_data.loc[prior_std.isnull(), 'flux_err']
obs_std = all_data['flux_err'] # since the above kernel tells us that the flux error is the 68% confidence interval
all_data['bayes_flux'] = (all_data['flux'] / obs_std**2 + prior_mean / prior_std**2) \
/ (1 / obs_std**2 + 1 / prior_std**2)
all_data.loc[all_data['bayes_flux'].notnull(), 'flux'] \
= all_data.loc[all_data['bayes_flux'].notnull(), 'bayes_flux']
# Estimate the flux at source, using the fact that light is proportional
# to inverse square of distance from source.
# This is hinted at here: https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70725#417195
redshift = all_meta.set_index('object_id')[['hostgal_specz', 'hostgal_photoz']]
if exact:
redshift['redshift'] = redshift['hostgal_specz']
redshift.loc[redshift['redshift'].isnull(), 'redshift'] \
= redshift.loc[redshift['redshift'].isnull(), 'hostgal_photoz']
else:
redshift['redshift'] = redshift['hostgal_photoz']
all_data = pd.merge(all_data, redshift, 'left', 'object_id')
nonzero_redshift = all_data['redshift'] > 0
all_data.loc[nonzero_redshift, 'flux'] = all_data.loc[nonzero_redshift, 'flux'] \
* all_data.loc[nonzero_redshift, 'redshift']**2
# aggregate features
band_aggs = all_data.groupby(['object_id', 'passband'])['flux'].agg(['mean', 'std', 'max', 'min']).unstack(-1)
band_aggs.columns = [x + '_' + str(y) for x in band_aggs.columns.levels[0]
for y in band_aggs.columns.levels[1]]
all_data.sort_values(['object_id', 'passband', 'flux'], inplace=True)
# this way of calculating quantiles is faster than using the pandas quantile builtin on the groupby object
all_data['group_count'] = all_data.groupby(['object_id', 'passband']).cumcount()
all_data['group_size'] = all_data.groupby(['object_id', 'passband'])['flux'].transform('size')
q_list = [0.25, 0.75]
for q in q_list:
all_data['q_' + str(q)] = all_data.loc[
(all_data['group_size'] * q).astype(int) == all_data['group_count'], 'flux']
quantiles = all_data.groupby(['object_id', 'passband'])[['q_' + str(q) for q in q_list]].max().unstack(-1)
quantiles.columns = [str(x) + '_' + str(y) + '_quantile' for x in quantiles.columns.levels[0]
for y in quantiles.columns.levels[1]]
# max detected flux
max_detected = all_data.loc[all_data['detected'] == 1].groupby('object_id')['flux'].max().to_frame('max_detected')
def most_extreme(df_in, k, positive=True, suffix='', include_max=True, include_dur=True, include_interval=False):
# find the "most extreme" time for each object, and for each band, retrieve the k data points on either side
# k points before
df = df_in.copy()
df['object_passband_mean'] = df.groupby(['object_id', 'passband'])['flux'].transform('median')
if positive:
df['dist_from_mean'] = (df['flux'] - df['object_passband_mean'])
else:
df['dist_from_mean'] = -(df['flux'] - df['object_passband_mean'])
max_time = df.loc[df['detected'] == 1].groupby('object_id')['dist_from_mean'].idxmax().to_frame(
'max_ind')
max_time['mjd_max' + suffix] = df.loc[max_time['max_ind'].values, 'mjd'].values
df = pd.merge(df, max_time[['mjd_max' + suffix]], 'left', left_on=['object_id'], right_index=True)
df['time_after_mjd_max'] = df['mjd'] - df['mjd_max' + suffix]
df['time_before_mjd_max'] = -df['time_after_mjd_max']
# first k after event
df.sort_values(['object_id', 'passband', 'time_after_mjd_max'], inplace=True)
df['row_num_after'] = df.loc[df['time_after_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_after = df.loc[(df['row_num_after'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_after']]
first_k_after.set_index(['object_id', 'passband', 'row_num_after'], inplace=True)
first_k_after = first_k_after.unstack(level=-1).unstack(level=-1)
first_k_after.columns = [str(x) + '_' + str(y) + '_after' for x in first_k_after.columns.levels[1]
for y in first_k_after.columns.levels[2]]
extreme_data = first_k_after
time_bands = [[-50, -20], [-20, -10], [-10, 0], [0, 10], [10, 20], [20, 50], [50, 100], [100, 200], [200, 500]]
if include_interval:
interval_arr = []
for start, end in time_bands:
band_data = df.loc[(start <= df['time_after_mjd_max']) & (df['time_after_mjd_max'] <= end)]
interval_agg = band_data.groupby(['object_id', 'passband'])['flux'].mean().unstack(-1)
interval_agg.columns = ['{}_start_{}_end_{}'.format(c, start, end) for c in interval_agg.columns]
interval_arr.append(interval_agg)
interval_data = pd.concat(interval_arr, axis=1)
extreme_data = pd.concat([extreme_data, interval_data], axis=1)
if include_dur:
# detection duration in each passband after event
duration_after = df.loc[(df['time_after_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_after_mjd_max'].first().unstack(-1)
duration_after.columns = ['dur_after_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_after], axis=1)
# last k before event
df.sort_values(['object_id', 'passband', 'time_before_mjd_max'], inplace=True)
df['row_num_before'] = df.loc[df['time_before_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_before = df.loc[(df['row_num_before'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_before']]
first_k_before.set_index(['object_id', 'passband', 'row_num_before'], inplace=True)
first_k_before = first_k_before.unstack(level=-1).unstack(level=-1)
first_k_before.columns = [str(x) + '_' + str(y) + '_before' for x in first_k_before.columns.levels[1]
for y in first_k_before.columns.levels[2]]
extreme_data = pd.concat([extreme_data, first_k_before], axis=1)
if include_dur:
# detection duration in each passband before event
duration_before = df.loc[(df['time_before_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_before_mjd_max'].first().unstack(-1)
duration_before.columns = ['dur_before_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_before], axis=1)
if include_max:
# passband with maximum detected flux for each object
max_pb = df.loc[max_time['max_ind'].values].groupby('object_id')['passband'].max().to_frame(
'max_passband')
# time of max in each passband, relative to extreme max
band_max_ind = df.groupby(['object_id', 'passband'])['flux'].idxmax()
band_mjd_max = df.loc[band_max_ind.values].groupby(['object_id', 'passband'])['mjd'].max().unstack(-1)
cols = ['max_time_' + str(i) for i in range(6)]
band_mjd_max.columns = cols
band_mjd_max = | pd.merge(band_mjd_max, max_time, 'left', 'object_id') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import HTML
def data():
df = pd.read_stata('data, R and html files/1section.dta')
df['loansgdp'] = df.tloans/df.gdp
return df.head()
def fig21():
# dynamics of loans to gdp ratio over time for USA, Germany, Australia, France
df = pd.read_stata('data, R and html files/1section.dta')
usa = df[df.country == 'USA']
ger = df[df.country == 'Germany']
aus = df[df.country == 'Australia']
fr = df[df.country == 'France']
uk = df[df.country == 'UK']
sp = df[df.country == 'Spain']
fig = plt.figure(figsize=(10,5))
plt.suptitle('Figure 2.1 Ratio of private loans to GDP in selected developed economies')
plt.subplot(2, 3, 1)
plt.plot(usa.year, usa.loansgdp, color='c')
plt.title('USA')
plt.subplot(2, 3, 2)
plt.plot(ger.year, ger.loansgdp, color='c')
plt.title('Germany')
plt.subplot(2, 3, 3)
plt.plot(aus.year, aus.loansgdp, color='c')
plt.title('Australia')
plt.subplot(2, 3, 4)
plt.plot(fr.year, fr.loansgdp, color='c')
plt.title('France', y=-0.35)
plt.subplot(2, 3, 5)
plt.plot(uk.year, uk.loansgdp, color='c')
plt.title('UK', y=-0.35)
plt.subplot(2, 3, 6)
plt.plot(sp.year, sp.loansgdp, color='c')
plt.title('Spain', y=-0.35)
fig.text(0.05,-0.08,'Source: Jorda et al.(2013) set');
plt.show()
def prep1():
#preparation of excess credit dummy
df = pd.read_stata('data, R and html files/1section.dta')
df['loansgdp'] = df.tloans/df.gdp
df['diff_loansgdp'] = 100*df['loansgdp'].diff()
loans_table = df.pivot_table(values = 'diff_loansgdp', index = 'country', columns = 'year')
pd.options.display.float_format = '{:,.3f}'.format
mean_year = loans_table.mean()
mean_all = mean_year.mean()
df['excredit'] = (df['diff_loansgdp'] > mean_all).astype(int)
#data preparation
df['year'] = df['year'].astype('int')
df['pk_fin'] = df['pk_fin'].astype('int')
df['pk_norm'] = df['pk_norm'].astype('int')
df['lrgdp'] = np.log(df.rgdp)
#copy to the file for R
df.to_stata('data, R and html files/1section1.dta')
df = df.loc[(df['pk_fin'] == 1) & (df['excredit'] == 1)]
df = df[['year','country', 'pk_norm', 'pk_fin', 'excredit', 'pop', 'gdp', 'rgdp', 'lrgdp', 'tloans', 'loansgdp', 'diff_loansgdp', 'dlcpi', 'dlriy', 'stir', 'ltrate', 'ldlrgdp', 'ldlcpi', 'ldlriy', 'lstir', 'lltrate']]
return df.head()
def data2():
#obtaining the data
df = pd.read_stata('data, R and html files/2section.dta')
pd.set_option('display.float_format', lambda x: '%.3f' % x)
df['year'] = df['year'].astype('int')
df = df[['year', 'state', 'stateid', 'Total_GSP', 'cpi', 'population', 'spend', 'employ_CES', 'Dcpi_ACCRA']]
#state output
#output in millions
df['out'] = df.Total_GSP*1000000
#real output
df['rout'] = df.out/df.cpi
#real output per capita
df['rcapout'] = df.rout/df.population
#percent change of real output per capita
df['Drcapout'] = df.sort_values(['year']).groupby('state')['rcapout'].pct_change()
#state spending
#real spending
df['rspend'] = df.spend/df.cpi
#real spending per capita
df['rcapspend'] = df.rspend/df.population
#real p.c.spending change as % of real p.c. output
df['rcapspend_lag'] = df.sort_values('year').groupby(['state'])['rcapspend'].shift(1)
df['rcapout_lag'] = df.sort_values('year').groupby(['state'])['rcapout'].shift(1)
df['Drcapspend'] = (df.rcapspend - df.rcapspend_lag)/df.rcapout_lag
#state employment
#employment in thousands
df['emp'] = df.employ_CES*1000
#% change in employment rate
df['emp_lag'] = df.sort_values('year').groupby(['state'])['emp'].shift(1)
df['population_lag'] = df.sort_values('year').groupby(['state'])['population'].shift(1)
df['Demp'] = (df.emp/df.population - df.emp_lag/df.population_lag)/(df.emp_lag/df.population_lag)
#state population
#% change of population
df['Dpop'] = (df.population-df.population_lag)/df.population_lag
#aggregate population
df1 = df.groupby('year')['population'].sum()
df1 = df1.to_frame().reset_index()
df1 = pd.concat([df1]*51)
df1 = df1.drop(columns="year")
df1 = df1.to_numpy()
df['population_nat'] = df1
#aggregate output
#aggregation of real output
df1 = df.groupby('year')['rout'].sum()
df1 = df1.to_frame().reset_index()
df1 = | pd.concat([df1]*51) | pandas.concat |
""" Survival regression with Cox's proportional hazard model. """
import argparse
import logging
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from lifelines import CoxPHFitter
MAX_DUR = 180
def get_pmf_from_survival(survival_f):
pmf = survival_f.copy()
for i in range(survival_f.shape[0] - 1):
pmf[i, :] -= pmf[i + 1, :]
sums = np.sum(pmf, axis=0)
#plt.plot(np.arange(MAX_DUR), pmf)
#plt.show()
assert (sums > 0.95).all(), survival_f[0]
return pmf
def loglikelihood(hazards, cum_hazards):
""" Refer to https://stats.stackexchange.com/questions/417303/
what-is-the-likelihood-for-this-process"""
assert hazards.shape == cum_hazards.shape
lls = np.log(hazards) - cum_hazards
return lls
def get_hazard_from_cum_hazard(cum_hazard):
"""
Refer to the Discrete survival models section in lifelines.
"""
hazard = 1 - np.exp(cum_hazard[:-1,:] - cum_hazard[1:,:])
return hazard
def get_covariates_dict_from_list(covs_list):
m = np.array([c.flatten() for c in covs_list])
d = {}
for i, dim in enumerate(m.T):
d['c{}'.format(i)] = dim
return d
def get_state_sequence_and_residual_time_from_vit(vit):
states = []
residual_times = []
for hs, dur in vit:
states.extend([hs] * dur)
residual_times.extend(np.arange(dur)[::-1])
assert len(states) == len(residual_times) and len(states) == vit[:,1].sum()
return states, residual_times
def get_pd(horizon, lobs, lvit, ntest_obs=None):
""" Get a pandas data frame from input data (lobs, lvit). horizon denotes
the number of observations that will be feed into the predictive model.
"""
assert horizon > 0
survival_times = []
covariates = []
for obs, vit in zip(lobs, lvit):
dim, nobs = obs.shape
_, residual_times = get_state_sequence_and_residual_time_from_vit(vit)
assert len(residual_times) == nobs
if ntest_obs is not None:
nobs = ntest_obs
for t in range(nobs - horizon + 1):
covariates.append(obs[:, t: t + horizon])
survival_time = residual_times[t + horizon - 1]
assert survival_time >= 0
survival_times.append(survival_time)
complete = [1] * len(survival_times) # All segments are complete. No censoring.
data_dict = {'survival_time': survival_times, 'complete': complete}
covariates_dict = get_covariates_dict_from_list(covariates)
data_dict.update(covariates_dict)
return | pd.DataFrame(data_dict) | pandas.DataFrame |
"""
A generative model training algorithm based on
"PATE-GAN: Generating Synthetic Data with Differential Privacy Guarantees"
by <NAME>, <NAME>, <NAME>, published in International Conference on Learning Representations (ICLR), 2019
Adapted from: https://bitbucket.org/mvdschaar/mlforhealthlabpub/src/82d7f91d46db54d256ff4fc920d513499ddd2ab8/alg/pategan/
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import pandas as pd
from tqdm import tqdm
try:
from .generative_model import GenerativeModel
except ImportError:
from generative_model import GenerativeModel
class PateGan(GenerativeModel):
""" A generative adversarial network trained under the PATE framework to achieve differential privacy """
def __init__(self, metadata, eps, delta,
MB_SIZE=128, C_DIM=1, LAM=10, LR=1e-4,
NITER=100, NUM_TEACHERS=10):
"""
:param metadata: dict: Attribute metadata describing the data domain of the synthetic target data
:param eps: float: Privacy parameter
:param delta: float: Privacy parameter
"""
self.metadata = metadata
self.epsilon = eps
self.delta = delta
self.datatype = pd.DataFrame
# Batch size
self.MB_SIZE = MB_SIZE
self.C_DIM = C_DIM
# WGAN-GP Parameters
self.LAM = LAM
self.LR = LR
self.NITER = NITER
self.NUM_TEACHERS = NUM_TEACHERS
self.trained = False
self.__name__ = f'PateGan{self.epsilon}'
def fit(self, data):
"""Fit the generative model of the training data distribution.
:param data: DataFrame: Training set
"""
X_train, Y_train, cols_to_reverse = self._one_hot(data)
self.columns_to_reverse = cols_to_reverse
self.no, self.X_dim = X_train.shape
self.z_dim = int(self.X_dim / 4)
self.h_dim = int(self.X_dim)
# Feature matrix
self.X = tf.placeholder(tf.float32, shape=[None, self.X_dim])
# Target variable
self.Y = tf.placeholder(tf.float32, shape=[None, self.C_DIM])
# Latent space
self.Z = tf.placeholder(tf.float32, shape=[None, self.z_dim])
# Conditional variable
self.M = tf.placeholder(tf.float32, shape=[None, self.C_DIM])
self.Y_train = Y_train
lamda = np.sqrt(2 * np.log(1.25 * (10 ^ (self.delta)))) / self.epsilon
# Data Preprocessing
X_train = np.asarray(X_train)
self.Min_Val = np.min(X_train, 0)
X_train = X_train - self.Min_Val
self.Max_Val = np.max(X_train, 0)
X_train = X_train / (self.Max_Val + 1e-8)
self.dim = len(X_train[:,0])
# Generator
self.G_sample = self._generator(self.Z,self.Y)
# Discriminator
D_real = self._discriminator(self.X, self.Y)
D_fake = self._discriminator(self.G_sample, self.Y)
D_entire = tf.concat(axis=0, values=[D_real, D_fake])
# Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([self.MB_SIZE, 1], minval=0., maxval=1.)
X_inter = eps * self.X + (1. - eps) * self.G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(self._discriminator(X_inter, self.Y), [X_inter, self.Y])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad) ** 2 + 1e-8, axis=1))
grad_pen = self.LAM * tf.reduce_mean((grad_norm - 1) ** 2)
# Loss function
D_loss = tf.reduce_mean((1 - self.M) * D_entire) - tf.reduce_mean(self.M * D_entire) + grad_pen
G_loss = -tf.reduce_mean(D_fake)
# Solver
D_solver = (tf.train.AdamOptimizer(learning_rate=self.LR, beta1=0.5).minimize(D_loss, var_list=self.theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate=self.LR, beta1=0.5).minimize(G_loss, var_list=self.theta_G))
# Start session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# Training iterations
for _ in tqdm(range(self.NITER)):
for _ in range(self.NUM_TEACHERS):
# Teacher training
Z_mb = self._sample_Z(self.MB_SIZE, self.z_dim)
# Teacher 1
X_idx = self._sample_X(self.no, self.MB_SIZE)
X_mb = X_train[X_idx, :]
Y_mb = np.reshape(Y_train[X_idx], [self.MB_SIZE, 1])
M_real = np.ones([self.MB_SIZE, ])
M_fake = np.zeros([self.MB_SIZE, ])
M_entire = np.concatenate((M_real, M_fake), 0)
Normal_Add = np.random.normal(loc=0.0, scale=lamda, size=self.MB_SIZE * 2)
M_entire = M_entire + Normal_Add
M_entire = (M_entire > 0.5)
M_mb = np.reshape(M_entire.astype(float), (2 * self.MB_SIZE, 1))
_, D_loss_curr = self.sess.run([D_solver, D_loss], feed_dict={self.X: X_mb, self.Z: Z_mb, self.M: M_mb, self.Y: Y_mb})
# Generator Training
Z_mb = self._sample_Z(self.MB_SIZE, self.z_dim)
X_idx = self._sample_X(self.no, self.MB_SIZE)
Y_mb = np.reshape(Y_train[X_idx], [self.MB_SIZE, 1])
_, G_loss_curr = self.sess.run([G_solver, G_loss], feed_dict={self.Z: Z_mb, self.Y: Y_mb})
self.trained = True
def generate_samples(self, nsamples):
"""""
Samples synthetic data records from the fitted generative distribution
:param nsamples: int: Number of synthetic records to generate
:return synData: DataFrame: A synthetic dataset
"""
# Output generation
New_X_train = self.sess.run([self.G_sample], feed_dict={self.Z: self._sample_Z(self.dim, self.z_dim),
self.Y: np.reshape(self.Y_train, [len(self.Y_train), 1])})
New_X_train = New_X_train[0]
# Renormalization
New_X_train = New_X_train * (self.Max_Val + 1e-8)
New_X_train = New_X_train + self.Min_Val
New_X_train = np.concatenate((New_X_train,np.reshape(self.Y_train, [len(self.Y_train), 1])), axis = 1)
np.random.shuffle(New_X_train)
synth_samples = self._reverse_one_hot(New_X_train[:nsamples])
for one_item in self.metadata['columns']:
if one_item['name'] == self.column_y:
y_values = one_item['i2s']
synth_samples[self.column_y] = \
synth_samples.apply(lambda x, reverse_one_hot=y_values: reverse_one_hot[int(float(x[self.column_y]))], axis=1)
return synth_samples
def _generator(self, z, y):
"""
PateGan generator implementation
:param z: training data
:param y: training labels
"""
G_W1 = tf.Variable(self._xavier_init([self.z_dim + self.C_DIM, self.h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[self.h_dim]))
G_W2 = tf.Variable(self._xavier_init([self.h_dim, self.h_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[self.h_dim]))
G_W3 = tf.Variable(self._xavier_init([self.h_dim, self.X_dim]))
G_b3 = tf.Variable(tf.zeros(shape=[self.X_dim]))
self.theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]
inputs = tf.concat([z, y], axis=1)
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
G_log_prob = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3)
return G_log_prob
def _discriminator(self, x, y):
"""
PateGan generator implementation
:param x: training data
:param y: training labels
"""
D_W1 = tf.Variable(self._xavier_init([self.X_dim + self.C_DIM, self.h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[self.h_dim]))
D_W2 = tf.Variable(self._xavier_init([self.h_dim, self.h_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[self.h_dim]))
D_W3 = tf.Variable(self._xavier_init([self.h_dim, 1]))
D_b3 = tf.Variable(tf.zeros(shape=[1]))
self.theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
inputs = tf.concat([x, y], axis=1)
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
out = (tf.matmul(D_h2, D_W3) + D_b3)
return out
def _xavier_init(self,size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def _sample_Z(self,m, n):
return np.random.uniform(-1., 1., size=[m, n])
def _sample_X(self,m, n):
return np.random.permutation(m)[:n]
def _one_hot(self, data):
continuous_columns = self.metadata['continuous_columns']
categorical_columns = sorted(self.metadata['categorical_columns'] + self.metadata['ordinal_columns'])
if data is pd.DataFrame():
df = data
else:
df = | pd.DataFrame(data) | pandas.DataFrame |
# Constants, variables, and methods that are commonly used
import os
from datetime import datetime
import numpy as np
import pandas as pd
from collections import Counter
ITEM_LIST_DATANORM = ['X_range','X_min','X_scaled','X_scaled_mean']
ITEM_LIST_RUNPCA = ['X_PCA','EigenValue_PCA','EigenVector_PCA','NumComp_PCA','Error_PCA']
ITEM_LIST_RUNKDE = ['s_v','c_v','hat_s_v','X_KDE','EigenValues_KDE','KDE_g','KDE_m','KDE_a','KDE_Z','KDE_Eigen']
ITEM_LIST_ISDEGENE = ['Errors','X_new']
ITEM_LIST = ['basic']+['constraints_file']+['X0','N','n']+ITEM_LIST_DATANORM+ITEM_LIST_RUNPCA \
+ITEM_LIST_RUNKDE+ITEM_LIST_ISDEGENE # all variables in the database
ITEM_ADDS = ['/'+x for x in ITEM_LIST] # HDFStore ABSOLUTE path-names
ATTR_LIST = [None,None,'X','N','n',
'alpha','x_min','X_scaled','x_mean',
'H','mu','phi','nu','errPCA',
's_v','c_v','hat_s_v','K','b','g','m','a','Z','eigenKDE',
'errors','Xnew']
ATTR_MAP = dict(zip(ITEM_ADDS, ATTR_LIST))
FULL_TASK_LIST = ['DataNormalization','RunPCA','RunKDE','ISDEGeneration']
TASK_ITEM_MAP = {'DataNormalization': ITEM_LIST_DATANORM,
'RunPCA': ITEM_LIST_RUNPCA,
'RunKDE': ITEM_LIST_RUNKDE,
'ISDEGeneration': ITEM_LIST_ISDEGENE}
class Logfile:
def __init__(self, logfile_dir = './', logfile_name = 'plom.log', screen_msg = True):
"""
Initializing the logfile
- logfile_dir: default is the same path of the PLoM package
- logfile_name: default is the "plom.log"
- screen_msg: default is to show message on screen
"""
self.logfile_dir = logfile_dir
self.logfile_name = logfile_name
self.logfile_path = os.path.join(self.logfile_dir, self.logfile_name)
self.screen_msg = screen_msg
# start the log
self.write_msg(msg = '--NEW LOG STARTING FROM THIS LINE--', mode='w')
def write_msg(self, msg = '', msg_type = 'RUNNING', msg_level = 0, mode='a'):
"""
Writing running messages
- msg: the message
- msg_type: the type of message 'RUNNING', 'WARNING', 'ERROR'
- msg_level: how many indent tags
"""
indent_tabs = ''.join(['\t']*msg_level)
decorated_msg = '{} {} {}-MSG {} '.format(datetime.utcnow(), indent_tabs, msg_type, msg)
if self.screen_msg:
print(decorated_msg)
with open(self.logfile_path, mode) as f:
f.write('\n'+decorated_msg)
def delete_logfile(self):
"""
Deleting the log file
"""
if os.path.exists(self.logfile_path):
os.remove(self.logfile_path)
else:
print('The logfile {} does not exist.'.format(self.logfile_path))
class DBServer:
def __init__(self, db_dir = './', db_name = 'plom.h5'):
"""
Initializing the database
- db_dir: default is the same path of the PLoM package
- db_name: default is "plom.h5"
"""
self.db_dir = db_dir
self.db_name = db_name
self.db_path = os.path.join(self.db_dir, self.db_name)
if os.path.exists(self.db_path):
# deleting the old database
os.remove(self.db_path)
self.init_time = datetime.utcnow()
self.item_name_list = []
self.basic()
self.dir_export = self._create_export_dir()
self._item_list = ITEM_LIST
self._item_adds = ITEM_ADDS
def basic(self):
"""
Writing basic info
"""
df = pd.DataFrame.from_dict({
'InitializedTime': [self.init_time],
'LastEditedTime': [datetime.utcnow()],
'DBName': [self.db_name],
}, dtype=str)
store = | pd.HDFStore(self.db_path, 'a') | pandas.HDFStore |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
sys.path.append('..')
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime, date
import os
from utils import data_paths, load_config
from pathlib import Path
from nltk.metrics import edit_distance #(Levenshtein)
import pycountry
import math
# # Estimating The Infected Population From Deaths
# > Estimating the number of infected people by country based on the number of deaths and case fatality rate.
#
# - comments: true
# - author: <NAME>
# - categories: [growth, compare, interactive, estimation]
# - hide: false
# - image: images/covid-estimate-infections.png
# - permalink: /covid-infected/
# - toc: true
# In[4]:
LOCAL_FILES=True
#jupyter or script
IS_SCRIPT = False
# In[5]:
os.getcwd()
# In[6]:
if IS_SCRIPT:
RUN_PATH = Path(os.path.realpath(__file__))
DATA_PARENT = RUN_PATH.parent.parent
else:
#for jupyter
cw = get_ipython().getoutput('pwd')
RUN_PATH = Path(cw[0])
DATA_PARENT = RUN_PATH.parent
# In[7]:
if IS_SCRIPT:
csse_data = data_paths('tools/csse_data_paths.yml')
else:
csse_data = data_paths('csse_data_paths.yml')
# In[8]:
if LOCAL_FILES:
confirmed_url=csse_data.get("csse_ts_local", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_local", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_local", {}).get('recovered', {})
confirmed_url = str(DATA_PARENT/confirmed_url)
deaths_url = str(DATA_PARENT/deaths_url)
recovered_url = str(DATA_PARENT/recovered_url)
else:
confirmed_url=csse_data.get("csse_ts_global", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_global", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_global", {}).get('recovered', {})
# In[9]:
### UN stats
# In[10]:
df_un_pop_density_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_density_info.csv')
df_un_urban_growth_info=pd.read_csv(DATA_PARENT/'data/un/urban_growth_info.csv')
df_un_health_info= | pd.read_csv(DATA_PARENT/'data/un/df_un_health_info.csv') | pandas.read_csv |
import os
import json
from time import sleep
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize, basinhopping
from scipy.special import gamma
from tqdm import tqdm
try:
import cupy as _p
from cupy import asnumpy
from cupyx.scipy.ndimage.filters import convolve as cuda_conv
from gzbuilder_analysis.rendering.cuda.sersic import sersic2d
def convolve(render, psf, **kwargs):
return cuda_conv(render, psf, mode='mirror')
except ModuleNotFoundError:
_p = np
asnumpy = np.asarray
from scipy.signal import convolve2d
from gzbuilder_analysis.rendering.sersic import sersic2d
def convolve(render, psf, **kwargs):
return convolve2d(render, psf, mode='same', boundary='symm')
from gzbuilder_analysis.rendering.sersic import _b
warnings.simplefilter('ignore', RuntimeWarning)
def sersic_ltot(I, Re, n, gamma=gamma):
return (
2 * np.pi * I * Re**2 * n
* np.exp(_b(n)) / _b(n)**(2 * n)
* gamma(2 * n)
)
def sersic_I(L, Re, n, gamma=gamma):
return L / (
2 * np.pi * Re**2 * n
* np.exp(_b(n)) / _b(n)**(2 * n)
* gamma(2 * n)
)
def gen_grid(shape, oversample_n):
x = _p.linspace(
0.5 / oversample_n - 0.5,
shape[1] - 0.5 - 0.5 / oversample_n,
shape[1] * oversample_n
)
y = _p.linspace(
0.5 / oversample_n - 0.5,
shape[0] - 0.5 - 0.5 / oversample_n,
shape[0] * oversample_n
)
return _p.meshgrid(x, y)
def bulge_disk_render(
cx, cy,
mux=0, muy=0, Re=1, q=1, I=1, roll=0,
bulge_dx=0, bulge_dy=0, bulge_scale=0.1, bulge_q=1, bulge_roll=0,
bulge_frac=0.1, bulge_n=1
):
if I == 0 or Re == 0:
disk = _p.zeros(cx.shape)
bulge = _p.zeros(cx.shape)
else:
# sersic2d(x, y, mux, muy, roll, Re, q, c, I, n)
disk = sersic2d(cx, cy, mux, muy, roll, Re, q, 2, I, 1)
if bulge_scale == 0 or bulge_frac == 0:
bulge = _p.zeros(cx.shape)
else:
disk_l = sersic_ltot(I, Re, 1)
comp_l = disk_l * bulge_frac / (1 - bulge_frac)
bulge_I = sersic_I(comp_l, bulge_scale * Re, bulge_n)
bulge = sersic2d(
cx, cy,
mux + bulge_dx, muy + bulge_dy,
bulge_roll, bulge_scale * Re,
bulge_q, 2, bulge_I, bulge_n
)
return (disk + bulge)
def downsample(render, oversample_n, size):
return render.reshape(
size[0], oversample_n, size[1], oversample_n
).mean(3).mean(1)
fm = pd.read_pickle('lib/fitting_metadata.pkl')
lims_df = pd.DataFrame(dict(
mux=[-np.inf, np.inf],
muy=[-np.inf, np.inf],
Re=[0, np.inf],
q=[0.2, 1],
I=[0, np.inf],
roll=[-np.inf, np.inf],
bulge_dx=[-np.inf, np.inf],
bulge_dy=[-np.inf, np.inf],
bulge_scale=[0, 1],
bulge_q=[0.4, 1],
bulge_roll=[-np.inf, np.inf],
bulge_frac=[0, 0.95],
bulge_n=[0.6, 8],
), index=('lower', 'upper')).T
class BasinhoppingBounds(object):
def __init__(self, lims):
self.lims = lims
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = np.all(x <= self.lims['upper'].values)
tmin = np.all(x >= self.lims['lower'].values)
return tmax and tmin
lims = BasinhoppingBounds(lims_df)
with tqdm(fm.index, desc='Fitting subjects') as bar:
for subject_id in bar:
bar.set_description(f'Fitting subjects (minimize) ')
# subject_id = 21686598
if not os.path.isfile(f'2comp_fits_nb4/minima/{subject_id}.csv'):
target = fm.loc[subject_id]['galaxy_data']
cp_mask = _p.asarray(target.mask)
cp_target = _p.asarray(target.data)
cp_psf = _p.asarray(fm['psf'][subject_id])
cp_sigma = _p.asarray(fm['sigma_image'][subject_id].data)
p0 = pd.Series(dict(
mux=target.shape[1] / 2,
muy=target.shape[1] / 2,
Re=20,
q=1,
I=0.8,
roll=0,
# bulge_dx=0,
# bulge_dy=0,
bulge_scale=0.2,
bulge_q=1,
bulge_roll=0,
bulge_frac=0.2,
# bulge_n=2,
))
oversample_n = 5
cx, cy = gen_grid(target.shape, oversample_n)
ndof = len(target.compressed())
def _f(p):
kw = {k: v for k, v in zip(p0.index, p)}
kw.setdefault('bulge_n', 4)
render = bulge_disk_render(cx, cy, **kw)
downsampled_render = downsample(render, oversample_n, size=target.shape)
psf_conv_render = convolve(downsampled_render, cp_psf)
diff = psf_conv_render[~cp_mask] - cp_target[~cp_mask]
chisq = asnumpy(_p.sum((diff / cp_sigma[~cp_mask])**2) / ndof)
return chisq
gradient_descent_res = minimize(
_f,
p0,
bounds=lims_df.reindex(p0.index).values,
)
p_gd = pd.Series(gradient_descent_res['x'], index=p0.index)
bar.set_description(f'Fitting subjects (basinhopping)')
minima = | pd.DataFrame(columns=(*p0.index, 'chisq', 'accepted')) | pandas.DataFrame |
import scipy.stats as st
import pandas as pd
# related to processing splitseq
def get_bc1_matches():
# from spclass.py - barcodes and their well/primer type identity
bc_file = '/Users/fairliereese/mortazavi_lab/bin/pacbio-splitpipe/barcodes/bc_8nt_v2.csv'
bc_df = pd.read_csv(bc_file, index_col=0, names=['bc'])
bc_df['well'] = [i for i in range(0, 48)]+[i for i in range(0, 48)]
bc_df['primer_type'] = ['dt' for i in range(0, 48)]+['randhex' for i in range(0, 48)]
# pivot on well to get df that matches bcs with one another from the same well
bc_df = bc_df.pivot(index='well', columns='primer_type', values='bc')
bc_df = bc_df.rename_axis(None, axis=1).reset_index()
bc_df.rename({'dt': 'bc1_dt', 'randhex': 'bc1_randhex'}, axis=1, inplace=True)
return bc_df
def get_illumina_metadata():
fname = '/Users/fairliereese/Documents/programming/mortazavi_lab/data/c2c12_paper_2020/sc_pacbio/illumina_cell_metadata_full_bcs.tsv'
# we want info about primer type as well
bc_df = get_bc1_matches()
# read in illumina bcs
df = pd.read_csv(fname, sep='\t')
cols = ['sample', 'umi_count', 'gene_count', 'bc']
df = df[cols]
df.rename({'umi_count':'ill_umi_count', 'gene_count': 'ill_gene_count'},
axis=1, inplace=True)
df['bc3'] = df.bc.str.slice(start=0, stop=8)
df['bc2'] = df.bc.str.slice(start=8, stop=16)
df['bc1'] = df.bc.str.slice(start=16, stop=24)
# merge bc1 df with illumina data
df = df.merge(bc_df, how='left', left_on='bc1', right_on='bc1_dt')
df.rename({'bc1_randhex': 'Random hexamer', 'bc1_dt':'Oligo dT'}, axis=1, inplace=True)
# melt df to duplicate entries b/w dt and randhex primers for each cell
id_vars = ['sample', 'ill_umi_count', 'ill_gene_count',\
'bc', 'bc3', 'bc2', 'bc1', 'well']
value_vars = ['Oligo dT', 'Random hexamer']
temp = pd.melt(df, id_vars=id_vars, value_vars=value_vars)
temp.rename({'variable': 'primer_type', 'value': 'raw_bc1', 'bc': \
'merged_bc'}, axis=1, inplace=True)
temp['raw_bc'] = temp.bc3+temp.bc2+temp.raw_bc1
return temp
def find_randhex_polydt_datasets(df, bc_df):
dt_bc1s = bc_df.bc1_dt.tolist()
randhex_bc1s = bc_df.bc1_randhex.tolist()
dt_datasets = df.loc[df.bc1.isin(dt_bc1s), 'dataset'].tolist()
randhex_datasets = df.loc[df.bc1.isin(randhex_bc1s), 'dataset'].tolist()
return dt_datasets, randhex_datasets
def get_sample_df(datasets):
sample_df = pd.DataFrame(data=datasets, columns=['dataset'])
sample_df.drop_duplicates(inplace=True)
i_df = get_illumina_metadata()
mini_idf = i_df[['raw_bc', 'sample']]
sample_df['experiment'] = sample_df.apply(lambda x: 'bulk' if 'PB' in x.dataset else 'sc', axis=1)
sample_df = sample_df.merge(mini_idf, how='left', left_on='dataset', right_on='raw_bc')
sample_df.loc[sample_df.dataset.isin(['PB154', 'PB155']), 'sample'] = 'MB'
sample_df.loc[sample_df.dataset.isin(['PB213', 'PB214']), 'sample'] = 'MT'
sample_df['tech'] = 'temp'
sample_df.loc[sample_df['sample'].str.contains('nuclei'), 'tech'] = 'Single-nucleus'
sample_df.loc[sample_df['sample'].str.contains('cells'), 'tech'] = 'Single-cell'
sample_df.loc[sample_df.experiment == 'bulk', 'tech'] = 'Bulk'
return sample_df
def calc_iso_complex_sig(df, obs_col, cond1, cond2):
dist1 = df.loc[df[obs_col] == cond1, 'n_genes_multiple_iso'].tolist()
dist2 = df.loc[df[obs_col] == cond2, 'n_genes_multiple_iso'].tolist()
stat, pval = st.mannwhitneyu(dist1, dist2)
print('Mann Whitney U statistic: {}'.format(stat))
print('P value: {}'.format(pval))
def calc_detection_stats(bulk, sc, novelty):
bulk_datasets = get_dataset_names(bulk)
sc_datasets = get_dataset_names(sc)
bulk['dataset_sum'] = bulk[bulk_datasets].sum(axis=1)
sc['dataset_sum'] = sc[sc_datasets].sum(axis=1)
# get rid of unexpressed
bulk = bulk.loc[bulk.dataset_sum > 0]
sc = sc.loc[sc.dataset_sum > 0]
# only novelty types we care about
bulk = bulk.loc[bulk.transcript_novelty.isin(novelty)]
sc = sc.loc[sc.transcript_novelty.isin(novelty)]
# sensitivity - true positives / total positives
# (all bulk-detected transcripts)
pos = len(bulk.index)
true_pos = len(list(set(bulk.transcript_ID.tolist())&set(sc.transcript_ID.tolist())))
sens = true_pos/pos
print('Sensitivity')
print(sens)
# false positive rate - number of transcripts uniquely detected in
# sc / number of transcripts detected in sc + bulk
false_pos = len(list(set(sc.transcript_ID.tolist())-set(bulk.transcript_ID.tolist())))
total_sc = len(sc.index)
fpr = false_pos/total_sc
print('False positive rate')
print(fpr)
def add_read_annot_metadata(df, bulk=False):
i_df = get_illumina_metadata()
df = df.merge(i_df, how='left', left_on='dataset', right_on='raw_bc')
if bulk:
datasets = df.dataset.unique().tolist()
sample_df = get_sample_df(datasets)
sample_df.drop(['sample', 'raw_bc'], axis=1, inplace=True)
df = df.merge(sample_df, how='left', on='dataset')
df.loc[df.dataset.isin(['PB154', 'PB155']), 'sample'] = 'MB'
df.loc[df.dataset.isin(['PB213', 'PB214']), 'sample'] = 'MT'
return df
def add_bcs_df(df):
df['bc3'] = df.dataset.str.slice(start=0, stop=8)
df['bc2'] = df.dataset.str.slice(start=8, stop=16)
df['bc1'] = df.dataset.str.slice(start=16, stop=24)
return df
# related to talon processing
def read_whitelist(fname):
df = pd.read_csv(fname, header=None, names=['gid', 'tid'])
whitelist = df.tid.tolist()
return whitelist
# from a talon abundance file, get a list of columns that correspond to the datasets
def get_dataset_names(df):
non_dataset_columns = ['gene_ID', 'transcript_ID', 'annot_gene_id',
'annot_transcript_id', 'annot_gene_name',
'annot_transcript_name', 'n_exons', 'length',
'gene_novelty', 'transcript_novelty', 'ISM_subtype', 'experiment']
dataset_cols = [ x for x in list(df.columns) \
if x not in non_dataset_columns ]
return dataset_cols
def get_gtf_info(fname, kind='gene'):
df = pd.read_csv(fname, sep='\t', comment='#', usecols=[0,2,3,4,8], header=None)
df.columns = ['chr', 'entry_type', 'start', 'stop', 'fields']
if kind == 'gene':
df = df.loc[df.entry_type == 'gene']
name_pat = 'gene_name "'
id_pat = 'gene_id "'
df['gene_type'] = df.fields.str.split(pat='gene_type "', n=1, expand=True)[1]
df['gene_type'] = df.gene_type.str.split(pat='"', n=1, expand=True)[0]
elif kind == 'transcript':
df = df.loc[df.entry_type == 'transcript']
name_pat = 'transcript_name "'
id_pat = 'transcript_id "'
else:
raise ValueError('Only genes or transcript u dumb')
df['name'] = df.fields.str.split(pat=name_pat, n=1, expand=True)[1]
df['name'] = df.name.str.split(pat='"', n=1, expand=True)[0]
df['id'] = df.fields.str.split(pat=id_pat, n=1, expand=True)[1]
df['id'] = df.id.str.split(pat='"', n=1, expand=True)[0]
df['len'] = df.start - df.stop
df['len'] = df.len.abs()
df.drop('fields', axis=1, inplace=True)
return df
def get_multiple_iso_genes(adata, gene_adata):
# create a df
# X = adata.raw.X
X = adata.X
columns = adata.var.index.tolist()
ind = adata.obs.merged_bc.tolist()
df = | pd.DataFrame(data=X, columns=columns, index=ind) | pandas.DataFrame |
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, MultiIndex,
RangeIndex)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = | tm.makeStringIndex(N) | pandas.util.testing.makeStringIndex |
import copy
import tempfile
from pathlib import Path
import pandas as pd
import numpy as np
import pytest
from gluonts.dataset.common import ListDataset
from autogluon.timeseries.dataset.ts_dataframe import TimeSeriesDataFrame, ITEMID, TIMESTAMP
START_TIMESTAMP = pd.Timestamp("01-01-2019", freq="D")
END_TIMESTAMP = pd.Timestamp("01-02-2019", freq="D")
ITEM_IDS = (0, 1, 2)
TARGETS = np.arange(9)
DATETIME_INDEX = tuple(pd.date_range(START_TIMESTAMP, periods=3))
EMPTY_ITEM_IDS = np.array([], dtype=np.int64)
EMPTY_DATETIME_INDEX = np.array([], dtype=np.dtype("datetime64[ns]"))
EMPTY_TARGETS = np.array([], dtype=np.int64)
def _build_ts_dataframe(item_ids, datetime_index, target):
multi_inds = pd.MultiIndex.from_product(
[item_ids, datetime_index], names=["item_id", "timestamp"]
)
return TimeSeriesDataFrame(
pd.Series(target, name="target", index=multi_inds).to_frame()
)
SAMPLE_TS_DATAFRAME = _build_ts_dataframe(ITEM_IDS, DATETIME_INDEX, TARGETS)
SAMPLE_TS_DATAFRAME_EMPTY = _build_ts_dataframe(EMPTY_ITEM_IDS, EMPTY_DATETIME_INDEX, EMPTY_TARGETS)
SAMPLE_DATAFRAME = pd.DataFrame(SAMPLE_TS_DATAFRAME).reset_index()
SAMPLE_ITERABLE = [
{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019", freq="D")},
{"target": [3, 4, 5], "start": pd.Timestamp("01-01-2019", freq="D")},
{"target": [6, 7, 8], "start": pd.Timestamp("01-01-2019", freq="D")},
]
def test_from_iterable():
ts_df = TimeSeriesDataFrame(SAMPLE_ITERABLE)
pd.testing.assert_frame_equal(ts_df, SAMPLE_TS_DATAFRAME, check_dtype=True)
with pytest.raises(ValueError):
TimeSeriesDataFrame([])
sample_iter = [{"target": [0, 1, 2]}]
with pytest.raises(ValueError):
TimeSeriesDataFrame(sample_iter)
sample_iter = [{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019")}]
with pytest.raises(ValueError):
TimeSeriesDataFrame(sample_iter)
def test_validate_data_frame():
item_ids = pd.Series(np.repeat(ITEM_IDS, 3))
datetimes = pd.Series(np.tile(DATETIME_INDEX, 3))
targets = pd.Series(TARGETS)
df = pd.concat([item_ids, datetimes, targets], axis=1)
with pytest.raises(ValueError):
TimeSeriesDataFrame(df)
df.columns = ["item_id", "timestamp", "target"]
TimeSeriesDataFrame(df)
def test_validate_multi_index_data_frame():
TimeSeriesDataFrame(SAMPLE_TS_DATAFRAME)
target = list(range(4))
item_ids = (1, 2, 3, 4)
with pytest.raises(ValueError):
TimeSeriesDataFrame(np.array([item_ids, target]).T, freq="D")
ts_df = pd.Series(target, name="target", index=item_ids).to_frame()
with pytest.raises(ValueError):
TimeSeriesDataFrame(ts_df, freq="D")
def test_from_gluonts_list_dataset():
N = 10 # number of time series
T = 100 # number of timesteps
prediction_length = 24
freq = "D"
custom_dataset = np.random.normal(size=(N, T))
start = pd.Timestamp("01-01-2019", freq=freq)
gluonts_list_dataset = ListDataset(
[{"target": x, "start": start} for x in custom_dataset[:, :-prediction_length]],
freq=freq,
)
TimeSeriesDataFrame(gluonts_list_dataset)
ts_df = TimeSeriesDataFrame(ListDataset(SAMPLE_ITERABLE, freq=freq))
pd.testing.assert_frame_equal(ts_df, SAMPLE_TS_DATAFRAME, check_dtype=False)
empty_list_dataset = ListDataset([], freq=freq)
with pytest.raises(ValueError):
TimeSeriesDataFrame(empty_list_dataset)
def test_from_data_frame():
tsdf_from_data_frame = TimeSeriesDataFrame(SAMPLE_DATAFRAME)
pd.testing.assert_frame_equal(
tsdf_from_data_frame, SAMPLE_TS_DATAFRAME, check_dtype=True
)
@pytest.mark.parametrize(
"split_item_id, left_items, left_datetimes, left_targets, right_items, right_datetimes, right_targets",
[
(
2,
(0, 1),
DATETIME_INDEX,
[0, 1, 2, 3, 4, 5],
(2,),
DATETIME_INDEX,
[6, 7, 8],
),
(
0,
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
ITEM_IDS,
DATETIME_INDEX,
TARGETS,
),
(
6,
ITEM_IDS,
DATETIME_INDEX,
TARGETS,
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
),
],
)
def test_split_by_item(
split_item_id,
left_items,
left_datetimes,
left_targets,
right_items,
right_datetimes,
right_targets,
):
left, right = SAMPLE_TS_DATAFRAME.split_by_item(split_item_id)
left_true = _build_ts_dataframe(left_items, left_datetimes, left_targets)
right_true = _build_ts_dataframe(right_items, right_datetimes, right_targets)
pd.testing.assert_frame_equal(left, left_true)
pd.testing.assert_frame_equal(right, right_true)
@pytest.mark.parametrize(
"split_time_stamp, left_items, left_datetimes, left_targets, right_items, right_datetimes, right_targets",
[
(
pd.Timestamp("01-03-2019"),
ITEM_IDS,
tuple(pd.date_range(START_TIMESTAMP, periods=2)),
[0, 1, 3, 4, 6, 7],
ITEM_IDS,
tuple(pd.date_range(pd.Timestamp("01-03-2019"), periods=1)),
[2, 5, 8],
),
(
pd.Timestamp("01-01-2019"),
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
ITEM_IDS,
DATETIME_INDEX,
TARGETS,
),
(
pd.Timestamp("01-04-2019"),
ITEM_IDS,
DATETIME_INDEX,
TARGETS,
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
),
],
)
def test_split_by_time(
split_time_stamp,
left_items,
left_datetimes,
left_targets,
right_items,
right_datetimes,
right_targets,
):
left, right = SAMPLE_TS_DATAFRAME.split_by_time(split_time_stamp)
left_true = _build_ts_dataframe(left_items, left_datetimes, left_targets)
right_true = _build_ts_dataframe(right_items, right_datetimes, right_targets)
pd.testing.assert_frame_equal(left, left_true)
pd.testing.assert_frame_equal(right, right_true)
@pytest.mark.parametrize(
"start_timestamp, end_timestamp, item_ids, datetimes, targets",
[
(
START_TIMESTAMP,
END_TIMESTAMP,
ITEM_IDS,
tuple(pd.date_range(START_TIMESTAMP, periods=1)),
[0, 3, 6],
),
(
pd.Timestamp("12-31-2018"),
END_TIMESTAMP,
ITEM_IDS,
tuple(pd.date_range(START_TIMESTAMP, periods=1)),
[0, 3, 6],
),
(
START_TIMESTAMP,
START_TIMESTAMP,
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
),
(
pd.Timestamp("01-04-2019"),
pd.Timestamp("01-05-2019"),
EMPTY_ITEM_IDS,
EMPTY_DATETIME_INDEX,
EMPTY_TARGETS,
),
],
)
def test_subsequence(start_timestamp, end_timestamp, item_ids, datetimes, targets):
new_tsdf = SAMPLE_TS_DATAFRAME.subsequence(start_timestamp, end_timestamp)
ts_df = _build_ts_dataframe(item_ids, datetimes, targets)
pd.testing.assert_frame_equal(new_tsdf, ts_df)
@pytest.mark.parametrize(
"timestamps, expected_freq",
[
(["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:00:00"], "D"),
(["2020-01-01 00:00:00", "2020-01-03 00:00:00", "2020-01-05 00:00:00"], "2D"),
(["2020-01-01 00:00:00", "2020-01-01 00:01:00", "2020-01-01 00:02:00"], "T"),
(["2020-01-01 00:00:00", "2020-01-01 01:00:00", "2020-01-01 02:00:00"], "H"),
],
)
def test_when_dataset_constructed_from_dataframe_without_freq_then_freq_is_inferred(
timestamps, expected_freq
):
df = pd.DataFrame(
{
"item_id": [0, 0, 0],
"target": [1, 2, 3],
"timestamp": map(pd.Timestamp, timestamps), # noqa
}
)
ts_df = TimeSeriesDataFrame.from_data_frame(df)
assert ts_df.freq == expected_freq
@pytest.mark.parametrize(
"start_time, freq",
[
("2020-01-01 00:00:00", "D"),
("2020-01-01 00:00:00", "2D"),
("2020-01-01 00:00:00", "T"),
("2020-01-01 00:00:00", "H"),
],
)
def test_when_dataset_constructed_from_iterable_with_freq_then_freq_is_inferred(
start_time, freq
):
item_list = ListDataset(
[{"target": [1, 2, 3], "start": pd.Timestamp(start_time)} for _ in range(3)],
freq=freq,
)
ts_df = TimeSeriesDataFrame.from_iterable_dataset(item_list)
assert ts_df.freq == freq
@pytest.mark.parametrize("list_of_timestamps", [
[
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:01:00"],
],
[
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:00:00"],
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:00:01"],
],
[
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:00:00"],
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-04 00:00:00"],
],
[
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:01:00"],
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:01:00"],
["2020-01-01 00:00:00", "2020-01-02 00:00:00", "2020-01-03 00:01:00"],
]
])
def test_when_dataset_constructed_with_irregular_timestamps_then_constructor_raises(
list_of_timestamps
):
df_tuples = []
for i, ts in enumerate(list_of_timestamps):
for t in ts:
df_tuples.append((i, pd.Timestamp(t), np.random.rand()))
df = | pd.DataFrame(df_tuples, columns=[ITEMID, TIMESTAMP, "target"]) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import copy
import warnings
import re
import pandas as pd
pd.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Parallel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerfCrossMutr
allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global keyFirstTime
keyFirstTime = True
global target_namesLoc
target_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformations
listofTransformations = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
target_names.append('Healthy')
target_names.append('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
target_names.append('Non-biodegr.')
target_names.append('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
target_names.append('Negative')
target_names.append('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
target_names.append('Cylinder')
target_names.append('Disk')
target_names.append('Flatellipsold')
target_names.append('Longellipsold')
target_names.append('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
target_names.append('No-use')
target_names.append('Long-term')
target_names.append('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
target_names.append('Van')
target_names.append('Car')
target_names.append('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
target_names.append('Fine')
target_names.append('Superior')
target_names.append('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.append(item)
DataRawLengthExternal = len(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global XDataExternal, yDataExternal
XDataExternal = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResultsExternal = copy.deepcopy(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[target], reverse=True)
DataResultsExternal.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsExternal = [o[target] for o in DataResultsRawExternal]
AllTargetsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsExternal):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesExternal.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesExternal.append(Class)
previous = value
ArrayDataResultsExternal = pd.DataFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargetsFloatValuesExternal
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
target_names.append(value)
else:
pass
AllTargetsFloatValues.append(Class)
previous = value
dfRaw = pd.DataFrame.from_dict(DataResultsRaw)
# OneTimeTemp = copy.deepcopy(dfRaw)
# OneTimeTemp.drop(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindex(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.copy()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replace("-", "_")
storeNewColumns.append(newCol.replace("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
else:
keepOriginalFeatures = XData.copy()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.tolist()
OrignList = keepOriginalFeatures.columns.values.tolist()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
global XDataStoredOriginal
XDataStoredOriginal = XData.copy()
global finalResultsData
finalResultsData = XData.copy()
global XDataNoRemoval
XDataNoRemoval = XData.copy()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.copy()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, max_depth, subsample, colsample_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
max_depth = int(max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, n_jobs=-1, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is mean of test_score
return np.mean(result['test_score'])
# check this issue later because we are not getting the same results
def executeModel(exeCall, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformations
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (len(exeCall) == 0):
if (flagEx == 3):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
else:
if (flagEx == 4):
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
#XDataStoredOriginal = XDataStored.copy()
elif (flagEx == 2):
XData = XDataStored.copy()
XDataStoredOriginal = XDataStored.copy()
XDataNoRemoval = XDataNoRemovalOrig.copy()
columnsNewGen = OrignList
else:
XData = XDataStored.copy()
#XDataNoRemoval = XDataNoRemovalOrig.copy()
XDataStoredOriginal = XDataStored.copy()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "max_depth": (6,12), "subsample": (0.8,1), "colsample_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.get('n_estimators')), eta=bestParams.get('eta'), max_depth=int(bestParams.get('max_depth')), subsample=bestParams.get('subsample'), colsample_bytree=bestParams.get('colsample_bytree'), probability=True, random_state=RANDOM_SEED, silent=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (len(exeCall) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for uniqueValue in exeCall:
currentColumnsDeleted.append(tracker[uniqueValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.drop(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.drop(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.tolist()
for indx, col in enumerate(columns):
if indx in exeCall:
columnsKeepNew.append(col)
columnsNewGen.append(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.rename(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCall[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacement = currentColumn.replace(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replace(storeRenamedColumn, nodeTransfName)
if (len(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XData[nodeTransfName] = pd.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = pd.to_numeric(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].mean())/XData[nodeTransfName].std()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].min())/(XData[nodeTransfName].max()-XData[nodeTransfName].min())
elif (splittedCol[1] == 'l2'):
dfTemp = []
dfTemp = np.log2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l1p'):
dfTemp = []
dfTemp = np.log1p(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'l10'):
dfTemp = []
dfTemp = np.log10(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'e2'):
dfTemp = []
dfTemp = np.exp2(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'em1'):
dfTemp = []
dfTemp = np.expm1(XData[nodeTransfName])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XData[nodeTransfName] = dfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.copy()
XDataNoRemovalOrig = XDataNoRemoval.copy()
columnsNamesLoc = XData.columns.values.tolist()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (len(splittedCol) == 1):
for tran in listofTransformations:
columnsNames.append(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformations:
if (splittedCol[1] == tran):
columnsNames.append(splittedCol[0])
else:
columnsNames.append(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (len(value) > 1):
tracker.append(value[1])
else:
tracker.append(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','recall_weighted']
flat_results = Parallel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMany = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMany = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.copy()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMany = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMany = howMany + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMany = howMany + 1
#else:
#pass
scores = scoresAct + previousState
if (howMany == 3):
scores.append(1)
else:
scores.append(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.append(scores.mean())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# std = np.std([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
maxList = max(importances)
minList = min(importances)
for f in range(Data.shape[1]):
ImpurityFS.append((importances[f] - minList) / (maxList - minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.append(0.95)
elif (RFEImp[f] == 2):
RankingFS.append(0.85)
elif (RFEImp[f] == 3):
RankingFS.append(0.75)
elif (RFEImp[f] == 4):
RankingFS.append(0.65)
elif (RFEImp[f] == 5):
RankingFS.append(0.55)
elif (RFEImp[f] == 6):
RankingFS.append(0.45)
elif (RFEImp[f] == 7):
RankingFS.append(0.35)
elif (RFEImp[f] == 8):
RankingFS.append(0.25)
elif (RFEImp[f] == 9):
RankingFS.append(0.15)
else:
RankingFS.append(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.append(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Parallelization Initilization")
flat_results = Parallel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.append(scoresHere.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = pd.DataFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = pd.DataFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.append({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='all')
fit = bestfeatures.fit(Data,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(Data.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
resultsFS.append(featureScores)
resultsFS.append(ImpurityFSDF)
resultsFS.append(perm_imp_eli5PD)
resultsFS.append(PerFeatureAccuracyPandas)
resultsFS.append(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.append(temp.mean())
scoresLoc.append(temp.std())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformation(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.select_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.select_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*len(listofTransformations)+0].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = XDataNumericCopy[i].round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+1].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.append(index)
XDataNumericCopy[i] = pd.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = pd.to_numeric(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+2].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].mean())/XDataNumericCopy[i].std()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+3].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].min())/(XDataNumericCopy[i].max()-XDataNumericCopy[i].min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+4].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+5].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log1p(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+6].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.log10(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+7].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.exp2(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+8].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
dfTemp = []
dfTemp = np.expm1(XDataNumericCopy[i])
dfTemp = dfTemp.replace([np.inf, -np.inf], np.nan)
dfTemp = dfTemp.fillna(0)
XDataNumericCopy[i] = dfTemp
if (np.isinf(dfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+9].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+10].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*len(listofTransformations)+11].split('_')
if(len(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.copy()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".format(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.append(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reset_index(drop=True)
DataRows2 = DataRows2.reset_index(drop=True)
DataRows3 = DataRows3.reset_index(drop=True)
DataRows4 = DataRows4.reset_index(drop=True)
DataRows5 = DataRows5.reset_index(drop=True)
targetRows1 = [yData[i] for i in quadrant1]
targetRows2 = [yData[i] for i in quadrant2]
targetRows3 = [yData[i] for i in quadrant3]
targetRows4 = [yData[i] for i in quadrant4]
targetRows5 = [yData[i] for i in quadrant5]
targetRows1Arr = np.array(targetRows1)
targetRows2Arr = np.array(targetRows2)
targetRows3Arr = np.array(targetRows3)
targetRows4Arr = np.array(targetRows4)
targetRows5Arr = np.array(targetRows5)
uniqueTarget1 = unique(targetRows1)
uniqueTarget2 = unique(targetRows2)
uniqueTarget3 = unique(targetRows3)
uniqueTarget4 = unique(targetRows4)
uniqueTarget5 = unique(targetRows5)
if (len(targetRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targetRows1Arr = targetRows1Arr.reshape(len(targetRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targetRows1Arr)
hotEncoderDF1 = pd.DataFrame(onehotEncoder1)
concatDF1 = pd.concat([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-len(uniqueTarget1):]
DataRows1 = DataRows1.replace([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillna(0)
X1 = add_constant(DataRows1)
X1 = X1.replace([np.inf, -np.inf], np.nan)
X1 = X1.fillna(0)
VIF1 = pd.Series([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replace([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillna(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = pd.Series()
if ((len(targetRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targetRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.tolist()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = pd.DataFrame()
VIF1 = pd.Series()
MI1List = []
if (len(targetRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targetRows2Arr = targetRows2Arr.reshape(len(targetRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targetRows2Arr)
hotEncoderDF2 = pd.DataFrame(onehotEncoder2)
concatDF2 = | pd.concat([DataRows2, hotEncoderDF2], axis=1) | pandas.concat |
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = | Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) | pandas.Series |
from IPython.core.display import display, HTML
import pandas as pd
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import glob
import os
import gc
from joblib import Parallel, delayed
from sklearn import preprocessing, model_selection
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import numpy.matlib
path_submissions = '/'
target_name = 'target'
scores_folds = {}
import pandas as pd
print(pd.__version__)
# data directory
# data_dir = '../input/optiver-realized-volatility-prediction/'
data_dir = '/home/data/optiver-realized-volatility-prediction/'
# Function to calculate first WAP
def calc_wap1(df):
wap = (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (
df['bid_size1'] + df['ask_size1'])
return wap
# Function to calculate second WAP
def calc_wap2(df):
wap = (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (
df['bid_size2'] + df['ask_size2'])
return wap
def calc_wap3(df):
wap = (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (
df['bid_size1'] + df['ask_size1'])
return wap
def calc_wap4(df):
wap = (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (
df['bid_size2'] + df['ask_size2'])
return wap
# Function to calculate the log of the return
# Remember that logb(x / y) = logb(x) - logb(y)
def log_return(series):
return np.log(series).diff()
# Calculate the realized volatility
def realized_volatility(series):
return np.sqrt(np.sum(series ** 2))
# Function to count unique elements of a series
def count_unique(series):
return len(np.unique(series))
# Function to read our base train and test set
def read_train_test():
train = pd.read_csv(data_dir + 'train.csv')
test = pd.read_csv(data_dir + '/test.csv')
# Create a key to merge with book and trade data
train['row_id'] = train['stock_id'].astype(str) + '-' + train['time_id'].astype(str)
test['row_id'] = test['stock_id'].astype(str) + '-' + test['time_id'].astype(str)
print(f'Our training set has {train.shape[0]} rows')
train.head()
return train, test
# Function to preprocess book data (for each stock id)
def book_preprocessor(file_path):
df = pd.read_parquet(file_path)
# Calculate Wap
df['wap1'] = calc_wap1(df)
df['wap2'] = calc_wap2(df)
df['wap3'] = calc_wap3(df)
df['wap4'] = calc_wap4(df)
# Calculate log returns
df['log_return1'] = df.groupby(['time_id'])['wap1'].apply(log_return)
df['log_return2'] = df.groupby(['time_id'])['wap2'].apply(log_return)
df['log_return3'] = df.groupby(['time_id'])['wap3'].apply(log_return)
df['log_return4'] = df.groupby(['time_id'])['wap4'].apply(log_return)
# Calculate wap balance
df['wap_balance'] = abs(df['wap1'] - df['wap2'])
# Calculate spread
df['price_spread'] = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1']) / 2)
df['price_spread2'] = (df['ask_price2'] - df['bid_price2']) / ((df['ask_price2'] + df['bid_price2']) / 2)
df['bid_spread'] = df['bid_price1'] - df['bid_price2']
df['ask_spread'] = df['ask_price1'] - df['ask_price2']
df["bid_ask_spread"] = abs(df['bid_spread'] - df['ask_spread'])
df['total_volume'] = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
df['volume_imbalance'] = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# Dict for aggregations
create_feature_dict = {
'wap1': [np.sum, np.std],
'wap2': [np.sum, np.std],
'wap3': [np.sum, np.std],
'wap4': [np.sum, np.std],
'log_return1': [realized_volatility],
'log_return2': [realized_volatility],
'log_return3': [realized_volatility],
'log_return4': [realized_volatility],
'wap_balance': [np.sum, np.max],
'price_spread': [np.sum, np.max],
'price_spread2': [np.sum, np.max],
'bid_spread': [np.sum, np.max],
'ask_spread': [np.sum, np.max],
'total_volume': [np.sum, np.max],
'volume_imbalance': [np.sum, np.max],
"bid_ask_spread": [np.sum, np.max],
}
create_feature_dict_time = {
'log_return1': [realized_volatility],
'log_return2': [realized_volatility],
'log_return3': [realized_volatility],
'log_return4': [realized_volatility],
}
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(fe_dict, seconds_in_bucket, add_suffix=False):
# Group by the window
df_feature = df[df['seconds_in_bucket'] >= seconds_in_bucket].groupby(['time_id']).agg(fe_dict).reset_index()
# Rename columns joining suffix
df_feature.columns = ['_'.join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix('_' + str(seconds_in_bucket))
return df_feature
# Get the stats for different windows
df_feature = get_stats_window(create_feature_dict, seconds_in_bucket=0, add_suffix=False)
df_feature_500 = get_stats_window(create_feature_dict_time, seconds_in_bucket=500, add_suffix=True)
df_feature_400 = get_stats_window(create_feature_dict_time, seconds_in_bucket=400, add_suffix=True)
df_feature_300 = get_stats_window(create_feature_dict_time, seconds_in_bucket=300, add_suffix=True)
df_feature_200 = get_stats_window(create_feature_dict_time, seconds_in_bucket=200, add_suffix=True)
df_feature_100 = get_stats_window(create_feature_dict_time, seconds_in_bucket=100, add_suffix=True)
# Merge all
df_feature = df_feature.merge(df_feature_500, how='left', left_on='time_id_', right_on='time_id__500')
df_feature = df_feature.merge(df_feature_400, how='left', left_on='time_id_', right_on='time_id__400')
df_feature = df_feature.merge(df_feature_300, how='left', left_on='time_id_', right_on='time_id__300')
df_feature = df_feature.merge(df_feature_200, how='left', left_on='time_id_', right_on='time_id__200')
df_feature = df_feature.merge(df_feature_100, how='left', left_on='time_id_', right_on='time_id__100')
# Drop unnecesary time_ids
df_feature.drop(['time_id__500', 'time_id__400', 'time_id__300', 'time_id__200', 'time_id__100'], axis=1,
inplace=True)
# Create row_id so we can merge
stock_id = file_path.split('=')[1]
df_feature['row_id'] = df_feature['time_id_'].apply(lambda x: f'{stock_id}-{x}')
df_feature.drop(['time_id_'], axis=1, inplace=True)
return df_feature
# Function to preprocess trade data (for each stock id)
def trade_preprocessor(file_path):
df = pd.read_parquet(file_path)
df['log_return'] = df.groupby('time_id')['price'].apply(log_return)
df['amount'] = df['price'] * df['size']
# Dict for aggregations
create_feature_dict = {
'log_return': [realized_volatility],
'seconds_in_bucket': [count_unique],
'size': [np.sum, np.max, np.min],
'order_count': [np.sum, np.max],
'amount': [np.sum, np.max, np.min],
}
create_feature_dict_time = {
'log_return': [realized_volatility],
'seconds_in_bucket': [count_unique],
'size': [np.sum],
'order_count': [np.sum],
}
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(fe_dict, seconds_in_bucket, add_suffix=False):
# Group by the window
df_feature = df[df['seconds_in_bucket'] >= seconds_in_bucket].groupby(['time_id']).agg(fe_dict).reset_index()
# Rename columns joining suffix
df_feature.columns = ['_'.join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix('_' + str(seconds_in_bucket))
return df_feature
# Get the stats for different windows
df_feature = get_stats_window(create_feature_dict, seconds_in_bucket=0, add_suffix=False)
df_feature_500 = get_stats_window(create_feature_dict_time, seconds_in_bucket=500, add_suffix=True)
df_feature_400 = get_stats_window(create_feature_dict_time, seconds_in_bucket=400, add_suffix=True)
df_feature_300 = get_stats_window(create_feature_dict_time, seconds_in_bucket=300, add_suffix=True)
df_feature_200 = get_stats_window(create_feature_dict_time, seconds_in_bucket=200, add_suffix=True)
df_feature_100 = get_stats_window(create_feature_dict_time, seconds_in_bucket=100, add_suffix=True)
def tendency(price, vol):
df_diff = np.diff(price)
val = (df_diff / price[1:]) * 100
power = np.sum(val * vol[1:])
return (power)
lis = []
for n_time_id in df['time_id'].unique():
df_id = df[df['time_id'] == n_time_id]
tendencyV = tendency(df_id['price'].values, df_id['size'].values)
f_max = np.sum(df_id['price'].values > np.mean(df_id['price'].values))
f_min = np.sum(df_id['price'].values < np.mean(df_id['price'].values))
df_max = np.sum(np.diff(df_id['price'].values) > 0)
df_min = np.sum(np.diff(df_id['price'].values) < 0)
# new
abs_diff = np.median(np.abs(df_id['price'].values - np.mean(df_id['price'].values)))
energy = np.mean(df_id['price'].values ** 2)
iqr_p = np.percentile(df_id['price'].values, 75) - np.percentile(df_id['price'].values, 25)
# vol vars
abs_diff_v = np.median(np.abs(df_id['size'].values - np.mean(df_id['size'].values)))
energy_v = np.sum(df_id['size'].values ** 2)
iqr_p_v = np.percentile(df_id['size'].values, 75) - np.percentile(df_id['size'].values, 25)
lis.append(
{'time_id': n_time_id, 'tendency': tendencyV, 'f_max': f_max, 'f_min': f_min, 'df_max': df_max,
'df_min': df_min,
'abs_diff': abs_diff, 'energy': energy, 'iqr_p': iqr_p, 'abs_diff_v': abs_diff_v, 'energy_v': energy_v,
'iqr_p_v': iqr_p_v})
df_lr = pd.DataFrame(lis)
df_feature = df_feature.merge(df_lr, how='left', left_on='time_id_', right_on='time_id')
# Merge all
df_feature = df_feature.merge(df_feature_500, how='left', left_on='time_id_', right_on='time_id__500')
df_feature = df_feature.merge(df_feature_400, how='left', left_on='time_id_', right_on='time_id__400')
df_feature = df_feature.merge(df_feature_300, how='left', left_on='time_id_', right_on='time_id__300')
df_feature = df_feature.merge(df_feature_200, how='left', left_on='time_id_', right_on='time_id__200')
df_feature = df_feature.merge(df_feature_100, how='left', left_on='time_id_', right_on='time_id__100')
# Drop unnecesary time_ids
df_feature.drop(['time_id__500', 'time_id__400', 'time_id__300', 'time_id__200', 'time_id', 'time_id__100'], axis=1,
inplace=True)
df_feature = df_feature.add_prefix('trade_')
stock_id = file_path.split('=')[1]
df_feature['row_id'] = df_feature['trade_time_id_'].apply(lambda x: f'{stock_id}-{x}')
df_feature.drop(['trade_time_id_'], axis=1, inplace=True)
return df_feature
# Function to get group stats for the stock_id and time_id
def get_time_stock(df):
vol_cols = ['log_return1_realized_volatility', 'log_return2_realized_volatility',
'log_return1_realized_volatility_400', 'log_return2_realized_volatility_400',
'log_return1_realized_volatility_300', 'log_return2_realized_volatility_300',
'log_return1_realized_volatility_200', 'log_return2_realized_volatility_200',
'trade_log_return_realized_volatility', 'trade_log_return_realized_volatility_400',
'trade_log_return_realized_volatility_300', 'trade_log_return_realized_volatility_200']
# Group by the stock id
df_stock_id = df.groupby(['stock_id'])[vol_cols].agg(['mean', 'std', 'max', 'min', ]).reset_index()
# Rename columns joining suffix
df_stock_id.columns = ['_'.join(col) for col in df_stock_id.columns]
df_stock_id = df_stock_id.add_suffix('_' + 'stock')
# Group by the stock id
df_time_id = df.groupby(['time_id'])[vol_cols].agg(['mean', 'std', 'max', 'min', ]).reset_index()
# Rename columns joining suffix
df_time_id.columns = ['_'.join(col) for col in df_time_id.columns]
df_time_id = df_time_id.add_suffix('_' + 'time')
# Merge with original dataframe
df = df.merge(df_stock_id, how='left', left_on=['stock_id'], right_on=['stock_id__stock'])
df = df.merge(df_time_id, how='left', left_on=['time_id'], right_on=['time_id__time'])
df.drop(['stock_id__stock', 'time_id__time'], axis=1, inplace=True)
return df
# Funtion to make preprocessing function in parallel (for each stock id)
def preprocessor(list_stock_ids, is_train=True):
# Parrallel for loop
def for_joblib(stock_id):
# Train
if is_train:
file_path_book = data_dir + "book_train.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_train.parquet/stock_id=" + str(stock_id)
# Test
else:
file_path_book = data_dir + "book_test.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_test.parquet/stock_id=" + str(stock_id)
# Preprocess book and trade data and merge them
df_tmp = pd.merge(book_preprocessor(file_path_book), trade_preprocessor(file_path_trade), on='row_id',
how='left')
# Return the merge dataframe
return df_tmp
# Use parallel api to call paralle for loop
df = Parallel(n_jobs=-1, verbose=1)(delayed(for_joblib)(stock_id) for stock_id in list_stock_ids)
# Concatenate all the dataframes that return from Parallel
df = pd.concat(df, ignore_index=True)
return df
# Function to calculate the root mean squared percentage error
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
# Function to early stop with root mean squared percentage error
def feval_rmspe(y_pred, lgb_train):
y_true = lgb_train.get_label()
return 'RMSPE', rmspe(y_true, y_pred), False
# Read train and test
train, test = read_train_test()
# Get unique stock ids
train_stock_ids = train['stock_id'].unique()
# Preprocess them using Parallel and our single stock id functions
train_ = preprocessor(train_stock_ids, is_train=True)
train = train.merge(train_, on=['row_id'], how='left')
# Get unique stock ids
test_stock_ids = test['stock_id'].unique()
# Preprocess them using Parallel and our single stock id functions
test_ = preprocessor(test_stock_ids, is_train=False)
# test = test.merge(test_, on=['row_id'], how='left')
test = test.merge(test_, on=['row_id'])
# Get group stats of time_id and stock_id
train = get_time_stock(train)
test = get_time_stock(test)
# replace by order sum (tau)
train['size_tau'] = np.sqrt(1 / train['trade_seconds_in_bucket_count_unique'])
test['size_tau'] = np.sqrt(1 / test['trade_seconds_in_bucket_count_unique'])
# train['size_tau_450'] = np.sqrt( 1/ train['trade_seconds_in_bucket_count_unique_450'] )
# test['size_tau_450'] = np.sqrt( 1/ test['trade_seconds_in_bucket_count_unique_450'] )
train['size_tau_400'] = np.sqrt(1 / train['trade_seconds_in_bucket_count_unique_400'])
test['size_tau_400'] = np.sqrt(1 / test['trade_seconds_in_bucket_count_unique_400'])
train['size_tau_300'] = np.sqrt(1 / train['trade_seconds_in_bucket_count_unique_300'])
test['size_tau_300'] = np.sqrt(1 / test['trade_seconds_in_bucket_count_unique_300'])
# train['size_tau_150'] = np.sqrt( 1/ train['trade_seconds_in_bucket_count_unique_150'] )
# test['size_tau_150'] = np.sqrt( 1/ test['trade_seconds_in_bucket_count_unique_150'] )
train['size_tau_200'] = np.sqrt(1 / train['trade_seconds_in_bucket_count_unique_200'])
test['size_tau_200'] = np.sqrt(1 / test['trade_seconds_in_bucket_count_unique_200'])
train['size_tau2'] = np.sqrt(1 / train['trade_order_count_sum'])
test['size_tau2'] = np.sqrt(1 / test['trade_order_count_sum'])
# train['size_tau2_450'] = np.sqrt( 0.25/ train['trade_order_count_sum'] )
# test['size_tau2_450'] = np.sqrt( 0.25/ test['trade_order_count_sum'] )
train['size_tau2_400'] = np.sqrt(0.33 / train['trade_order_count_sum'])
test['size_tau2_400'] = np.sqrt(0.33 / test['trade_order_count_sum'])
train['size_tau2_300'] = np.sqrt(0.5 / train['trade_order_count_sum'])
test['size_tau2_300'] = np.sqrt(0.5 / test['trade_order_count_sum'])
# train['size_tau2_150'] = np.sqrt( 0.75/ train['trade_order_count_sum'] )
# test['size_tau2_150'] = np.sqrt( 0.75/ test['trade_order_count_sum'] )
train['size_tau2_200'] = np.sqrt(0.66 / train['trade_order_count_sum'])
test['size_tau2_200'] = np.sqrt(0.66 / test['trade_order_count_sum'])
# delta tau
train['size_tau2_d'] = train['size_tau2_400'] - train['size_tau2']
test['size_tau2_d'] = test['size_tau2_400'] - test['size_tau2']
colNames = [col for col in list(train.columns)
if col not in {"stock_id", "time_id", "target", "row_id"}]
len(colNames)
from sklearn.cluster import KMeans
# making agg features
train_p = pd.read_csv(data_dir + '/train.csv')
train_p = train_p.pivot(index='time_id', columns='stock_id', values='target')
corr = train_p.corr()
ids = corr.index
kmeans = KMeans(n_clusters=7, random_state=0).fit(corr.values)
print(kmeans.labels_)
l = []
for n in range(7):
l.append([(x - 1) for x in ((ids + 1) * (kmeans.labels_ == n)) if x > 0])
mat = []
matTest = []
n = 0
for ind in l:
print(ind)
newDf = train.loc[train['stock_id'].isin(ind)]
newDf = newDf.groupby(['time_id']).agg(np.nanmean)
newDf.loc[:, 'stock_id'] = str(n) + 'c1'
mat.append(newDf)
newDf = test.loc[test['stock_id'].isin(ind)]
newDf = newDf.groupby(['time_id']).agg(np.nanmean)
newDf.loc[:, 'stock_id'] = str(n) + 'c1'
matTest.append(newDf)
n += 1
mat1 = pd.concat(mat).reset_index()
mat1.drop(columns=['target'], inplace=True)
mat2 = pd.concat(matTest).reset_index()
mat2 = pd.concat([mat2, mat1.loc[mat1.time_id == 5]])
mat1 = mat1.pivot(index='time_id', columns='stock_id')
mat1.columns = ["_".join(x) for x in mat1.columns.ravel()]
mat1.reset_index(inplace=True)
mat2 = mat2.pivot(index='time_id', columns='stock_id')
mat2.columns = ["_".join(x) for x in mat2.columns.ravel()]
mat2.reset_index(inplace=True)
nnn = ['time_id',
'log_return1_realized_volatility_0c1',
'log_return1_realized_volatility_1c1',
'log_return1_realized_volatility_3c1',
'log_return1_realized_volatility_4c1',
'log_return1_realized_volatility_6c1',
'total_volume_sum_0c1',
'total_volume_sum_1c1',
'total_volume_sum_3c1',
'total_volume_sum_4c1',
'total_volume_sum_6c1',
'trade_size_sum_0c1',
'trade_size_sum_1c1',
'trade_size_sum_3c1',
'trade_size_sum_4c1',
'trade_size_sum_6c1',
'trade_order_count_sum_0c1',
'trade_order_count_sum_1c1',
'trade_order_count_sum_3c1',
'trade_order_count_sum_4c1',
'trade_order_count_sum_6c1',
'price_spread_sum_0c1',
'price_spread_sum_1c1',
'price_spread_sum_3c1',
'price_spread_sum_4c1',
'price_spread_sum_6c1',
'bid_spread_sum_0c1',
'bid_spread_sum_1c1',
'bid_spread_sum_3c1',
'bid_spread_sum_4c1',
'bid_spread_sum_6c1',
'ask_spread_sum_0c1',
'ask_spread_sum_1c1',
'ask_spread_sum_3c1',
'ask_spread_sum_4c1',
'ask_spread_sum_6c1',
'volume_imbalance_sum_0c1',
'volume_imbalance_sum_1c1',
'volume_imbalance_sum_3c1',
'volume_imbalance_sum_4c1',
'volume_imbalance_sum_6c1',
'bid_ask_spread_sum_0c1',
'bid_ask_spread_sum_1c1',
'bid_ask_spread_sum_3c1',
'bid_ask_spread_sum_4c1',
'bid_ask_spread_sum_6c1',
'size_tau2_0c1',
'size_tau2_1c1',
'size_tau2_3c1',
'size_tau2_4c1',
'size_tau2_6c1']
train = pd.merge(train, mat1[nnn], how='left', on='time_id')
test = | pd.merge(test, mat2[nnn], how='left', on='time_id') | pandas.merge |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib as mp
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from fractions import Fraction
from pandas import DataFrame, read_table
from .model import ImageSet
class RegionSet(object):
""" Base class for sets of image regions of interest.
RegionSets can be used to spatially group fixations, create Feature objects
for a FixationModel and split an image into parts. Classes inheriting from
RegionSet may specify functions to create regions.
Attributes:
info (DataFrame): table of region metadata (labels, bboxes, number of pixels...)
imageids (list): list of all imageids associated with this RegionSet
is_global (bool): True if regions are global (non-image-specific)
label (str): optional label to distinguish between RegionSets.
memory_usage (float): memory usage of all binary masks (kiB)
size (tuple): image dimensions, specified as (width, height).
"""
def __init__(self, size, regions, region_labels=None, label=None, add_background=False):
""" Create a new RegionSet from existing region masks.
Args:
size (tuple): image dimensions, specified as (width, height)
regions: 3d ndarray (bool) with global set of masks
OR dict of multiple such ndarrays, with imageids as keys
region_labels: list of region labels IF _regions_ is a single array
OR dict of such lists, with imageids as keys
label (str): optional descriptive label for this RegionSet
add_background (bool): if True, this creates a special region to capture all
fixations that don't fall on an explicit region ("background" fixations)
Raises:
ValueError if incorrectly formatted regions/region_labels provided
"""
self._regions = {'*': np.ndarray((0,0,0))}
self._labels = {'*': []}
self.size = size
self.label = label
self._msize = (size[1], size[0]) # matrix convention
self.has_background = False
if isinstance(regions, dict):
# Dict with image-specific region ndarrays
self._regions = regions
if region_labels is not None and isinstance(region_labels, dict) and len(regions) == len(region_labels):
# Check imageids for consistency
for r in regions.keys():
if r not in region_labels.keys():
raise ValueError('Labels not consistent: {:s} not in region_labels'.format(r))
for r in region_labels.keys():
if r not in regions.keys():
raise ValueError('Labels not consistent: {:s} not in regions'.format(r))
self._labels = region_labels
else:
self._labels = {}
for imid in regions:
self._labels[imid] = [str(x+1) for x in range(len(regions[imid]))]
elif isinstance(regions, np.ndarray):
# Single array of regions - assume global region set ('*')
if regions.shape[1:] == self._msize:
self._regions['*'] = regions.astype(bool)
if region_labels is not None and len(region_labels) == regions.shape[0]:
self._labels['*'] = region_labels
else:
self._labels['*'] = [str(x+1) for x in range(regions.shape[0])]
else:
raise ValueError('First argument for RegionSet creation must be ndarray ' +
'(global regions) or dict of ndarrays (image-specific regions)!')
if add_background:
for iid in self._regions.keys():
bgmask = ~self.mask(iid).reshape(1, size[1], size[0])
self._regions[iid] = np.concatenate([self._regions[iid], bgmask], axis=0)
self._labels[iid].append('__BG__')
self.has_background = True
self.info = self._region_metadata()
def __repr__(self):
""" String representation """
r = 'gridfix.RegionSet(label={:s}, size=({:d}, {:d}),\nregions={:s},\nregion_labels={:s})'
return r.format(str(self.label), self.size[0], self.size[1], str(self._regions), str(self._labels))
def __str__(self):
""" Short string representation for printing """
r = '<{:s}{:s}, size={:s}, {:d} region{:s}{:s}, memory={:.1f} kB>'
myclass = str(self.__class__.__name__)
if self.label is not None:
lab = ' ({:s})'.format(self.label)
else:
lab = ''
num_s = ''
num_r = len(self)
if num_r > 1:
num_s = 's'
imid_s = ''
if len(self._regions) > 1 and not self.is_global:
imid_s = ' in {:d} images'.format(len(self._regions))
return r.format(myclass, lab, str(self.size), num_r, num_s, imid_s, self.memory_usage)
def __len__(self):
""" Overload len(RegionSet) to report total number of regions. """
if self.is_global:
return len(self._regions['*'])
else:
num_r = 0
for imid in self._regions:
num_r += len(self._regions[imid])
return num_r
def __getitem__(self, imageid):
""" Bracket indexing returns all region masks for a specified imageid.
If global regions are set ('*'), always return global region set.
"""
return self._select_region(imageid)
def _region_metadata(self):
""" Return DataFrame of region metadata """
info_cols = ['imageid', 'regionid', 'regionno', 'left', 'top', 'right', 'bottom', 'width', 'height', 'area', 'imgfrac']
info = []
if self.is_global:
imageids = ['*']
else:
imageids = self.imageids
for imid in imageids:
reg = self._select_region(imid)
lab = self._select_labels(imid)
for i,l in enumerate(lab):
a = np.argwhere(reg[i])
if a.shape[0] > 0:
(top, left) = a.min(0)[0:2]
(bottom, right) = a.max(0)[0:2]
(width, height) = (right-left+1, bottom-top+1)
area = reg[i][reg[i] > 0].sum()
imgfrac = round(area / (reg[i].shape[0] * reg[i].shape[1]), 4)
else:
# Region is empty - shouldn't, but can happen with add_background at full coverage
(top, left, bottom, right, width, height, area, imgfrac) = (0,) * 8
rmeta = [imid, l, i+1, left, top, right, bottom, width, height, area, imgfrac]
info.append(rmeta)
return DataFrame(info, columns=info_cols)
def _select_region(self, imageid=None):
""" Select region by imageid with consistency check """
if self.is_global:
return(self._regions['*'])
if imageid is not None and imageid in self._regions.keys():
return(self._regions[imageid])
else:
raise ValueError('RegionSet contains image-specific regions, but no valid imageid was specified!')
def _select_labels(self, imageid=None):
""" Select region labels corresponding to _select_region """
if self.is_global:
return(self._labels['*'])
if imageid is not None and imageid in self._regions.keys():
return(self._labels[imageid])
else:
raise ValueError('RegionSet contains image-specific regions, but no valid imageid was specified!')
@property
def is_global(self):
""" Return True if a global map is defined (key '*') """
if '*' in self._regions.keys():
return True
else:
return False
@property
def imageids(self):
""" Return list of imageids for which region maps exist """
if self.is_global:
return []
imids = []
for imid in self._regions.keys():
imids.append(imid)
return imids
@property
def memory_usage(self):
""" Calculate size in memory of all regions combined """
msize = 0.0
for reg in self._regions.keys():
msize += float(self._regions[reg].nbytes) / 1024.0
return msize
def count_map(self, imageid=None, ignore_background=True):
""" Return the number of regions referencing each pixel.
Args:
imageid (str): if set, return map for specified image only
ignore_background (bool): if True, ignore auto-generated background region
Returns:
2d ndarray of image size, counting number of regions for each pixel
"""
cm = np.zeros(self._msize, dtype=int)
if self.is_global:
for reidx, re in enumerate(self._regions['*'][:, ...]):
if ignore_background and self._labels['*'][reidx] == '__BG__':
continue
cm += re.astype(int)
return cm
elif imageid is None:
for imid in self._regions:
if imid == '*':
continue
for reidx, re in enumerate(self._regions[imid][:, ...]):
if ignore_background and self._labels[imid][reidx] == '__BG__':
continue
cm += re.astype(int)
else:
r = self._select_region(imageid)
l = self._select_labels(imageid)
for reidx, re in enumerate(r[:, ...]):
if ignore_background and l[reidx] == '__BG__':
continue
cm += re.astype(int)
return cm
def mask(self, imageid=None, ignore_background=True):
""" Return union mask of all regions or regions for specified image.
Args:
imageid (str): if set, return mask for specified image only
ignore_background (bool): if True, ignore auto-generated background region
Returns:
2d ndarray of image size (bool), True where at least one region
references the corresponding pixel.
"""
return self.count_map(imageid, ignore_background).astype(bool)
def region_map(self, imageid=None, ignore_background=True):
""" Return map of region numbers, global or image-specifid.
Args:
imageid (str): if set, return map for specified image only
ignore_background (bool): if True, ignore auto-generated background region
Returns:
2d ndarray (int), containing the number (ascending) of the last
region referencing the corresponding pixel.
"""
apply_regions = self._select_region(imageid)
apply_labels = self._select_labels(imageid)
tmpmap = np.zeros(self._msize)
for idx, region in enumerate(apply_regions):
if ignore_background and apply_labels[idx] == '__BG__':
continue
tmpmap[region] = (idx + 1)
return tmpmap
def coverage(self, imageid=None, normalize=False, ignore_background=True):
""" Calculates coverage of the total image size as a scalar.
Args:
imageid (str): if set, return coverage for specified image only
normalize (bool): if True, divide global result by number of imageids in set.
ignore_background (bool): if True, ignore auto-generated background region
Returns:
Total coverage as a floating point number.
"""
if imageid is not None:
counts = self.count_map(imageid, ignore_background)
cov = float(counts.sum()) / float(self.size[0] * self.size[1])
return cov
else:
# Global coverage for all imageids
cm = np.zeros(self._msize, dtype=int)
for re in self._regions.keys():
if re == '*':
cm += self.count_map('*', ignore_background)
break
cm += self.count_map(re, ignore_background)
cov = float(cm.sum()) / float(self.size[0] * self.size[1])
if normalize:
cov = cov / len(self)
return cov
def plot(self, imageid=None, values=None, cmap=None, image_only=False, ax=None, alpha=1.0):
""" Plot regions as map of shaded areas with/without corresponding feature values
Args:
imageid (str): if set, plot regions for specified image
values (array-like): one feature value per region
cmap (str): name of matplotlib colormap to use to distinguish regions
image_only (boolean): if True, return only image content without axes
ax (Axes): axes object to draw to, to include result in other figure
alpha (float): opacity of plotted regions (set < 1 to visualize overlap)
Returns:
matplotlib figure object, or None if passed an axis to draw on
"""
apply_regions = self._select_region(imageid)
tmpmap = np.zeros(self._msize)
if ax is not None:
ax1 = ax
else:
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
if cmap is None:
if values is None and 'viridis' in plt.colormaps():
cmap = 'viridis'
else:
cmap = 'gray'
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
if alpha < 1.0:
# allow stacking by setting masked values transparent
alpha_cmap = cmap
alpha_cmap.set_bad(alpha=0)
ax1.imshow(tmpmap, cmap=plt.get_cmap('gray'), interpolation='none')
for idx, region in enumerate(apply_regions):
rmap = np.zeros(self._msize)
if values is not None and len(values) == apply_regions.shape[0]:
rmap[region] = values[idx]
ax1.imshow(np.ma.masked_equal(rmap, 0), cmap=alpha_cmap, interpolation='none', alpha=alpha,
vmin=0, vmax=np.nanmax(values))
else:
rmap[region] = idx + 1
ax1.imshow(np.ma.masked_equal(rmap, 0), cmap=alpha_cmap, interpolation='none', alpha=alpha,
vmin=0, vmax=apply_regions.shape[0])
else:
# If no alpha requested, this is much faster but doesn't show overlap
ax1.imshow(tmpmap, cmap=plt.get_cmap('gray'), interpolation='none')
if values is not None and len(values) == apply_regions.shape[0]:
rmap = np.zeros(self._msize)
for idx, region in enumerate(apply_regions):
rmap[region] = values[idx]
ax1.imshow(np.ma.masked_equal(rmap, 0), cmap=cmap, interpolation='none', vmin=0, vmax=np.nanmax(values))
else:
ax1.imshow(np.ma.masked_equal(self.region_map(imageid), 0), cmap=cmap, interpolation='none',
vmin=0, vmax=apply_regions.shape[0])
if image_only:
ax1.axis('off')
else:
t = '{:s}'.format(self.__class__.__name__)
if self.label is not None:
t += ': {:s}'.format(self.label)
if imageid is not None:
t += ' (img: {:s})'.format(imageid)
ax1.set_title(t)
if ax is None and not plt.isinteractive(): # see ImageSet.plot()
return fig
def plot_regions_on_image(self, imageid=None, imageset=None, image_cmap=None, cmap=None, plotcolor=None,
fill=False, alpha=0.4, labels=False, image_only=False, ax=None):
""" Plot region bounding boxes on corresponding image
Args:
imageid (str): if set, plot regions for specified image
imageset (ImageSet): ImageSet object containing background image/map
image_cmap (str): name of matplotlib colormap to use for image
cmap (str): name of matplotlib colormap to use for bounding boxes
plotcolor (color): matplotlib color for bboxes (overrides colormap)
fill (boolean): draw shaded filled rectangles instead of boxes
alpha (float): rectangle opacity (only when fill=True)
labels (boolean): if True, draw text labels next to regions
image_only (boolean): if True, return only image content without axes
ax (Axes): axes object to draw to, to include result in other figure
Returns:
matplotlib figure object, or None if passed an axis to draw on
"""
if imageset is None or imageid not in imageset.imageids:
raise ValueError('To plot regions on top of image, specify ImageSet containing corresponding background image!')
if ax is not None:
ax1 = ax
else:
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
if image_cmap is not None:
if type(image_cmap) == str:
image_cmap = plt.get_cmap(image_cmap)
ax1.imshow(imageset[imageid], cmap=image_cmap, interpolation='none')
else:
ax1.imshow(imageset[imageid], interpolation='none')
if cmap is None:
if 'viridis' in plt.colormaps():
cmap = 'viridis'
else:
cmap = 'hsv'
if type(cmap) == str:
boxcolors = plt.get_cmap(cmap)
else:
boxcolors = cmap
cstep = 0
if self.is_global:
rmeta = self.info[self.info.imageid == '*']
else:
rmeta = self.info[self.info.imageid == imageid]
for idx, region in rmeta.iterrows():
if self.has_background and region.regionid == '__BG__':
# Always skip background region when drawing bboxes
continue
if plotcolor is None:
c = boxcolors(cstep/len(rmeta))
else:
c = plotcolor
cstep += 1
if not fill:
ax1.add_patch(Rectangle((region.left, region.top), region.width, region.height, color=c, fill=False, linewidth=2))
else:
ax1.add_patch(Rectangle((region.left, region.top), region.width, region.height, color=c, linewidth=0, alpha=0.7))
if labels:
# Draw text labels with sensible default positions
if region.right > (self.size[0] * .95):
tx = region.right
ha = 'right'
else:
tx = region.left
ha = 'left'
if region.bottom > (self.size[1] * .95):
ty = region.top - 5
else:
ty = region.bottom + 20
ax1.text(tx, ty, region.regionid, horizontalalignment=ha)
if image_only:
ax1.axis('off')
else:
t = '{:s}'.format(self.__class__.__name__)
if self.label is not None:
t += ': {:s}'.format(self.label)
t += ' (img: {:s})'.format(imageid)
ax1.set_title(t)
if ax is None and not plt.isinteractive(): # see ImageSet.plot()
return fig
def apply(self, image, imageid=None, crop=False, ignore_background=True):
""" Apply this RegionSet to a specified image.
Returns a list of the image arrays "cut out" by each region mask, with
non-selected image areas in black. If regionset is not global, _imageid_ needs
to be specified!
Args:
image (ndarray): image array to be segmented.
imageid (str): valid imageid (to select image-specific regions if not a global regionset)
crop (bool): if True, return image cropped to bounding box of selected area
ignore_background (bool): if True, ignore auto-generated background region
Returns:
If crop=False, a list of ndarrays of same size as image, with non-selected areas
zeroed. Else a list of image patch arrays cropped to bounding box size.
"""
slices = []
apply_regions = self._select_region(imageid)
apply_labels = self._select_labels(imageid)
for idx, region in enumerate(apply_regions):
if ignore_background and apply_labels[idx] == '__BG__':
continue
mask = (region == True)
out = np.zeros(image.shape)
out[mask] = image[mask]
if crop:
a = np.argwhere(out)
if a.shape[0] > 0:
(ul_x, ul_y) = a.min(0)[0:2]
(br_x, br_y) = a.max(0)[0:2]
out = out[ul_x:br_x+1, ul_y:br_y+1]
slices.append(out)
return slices
def export_patches(self, image, imageid=None, crop=True, image_format='png',
rescale=False, ignore_background=True):
""" Apply this RegionSet to an image array and save the resulting image patches as files.
Saves an image of each image part "cut out" by each region mask, cropped by default.
If the RegionSet is not global, imageid needs to be specified!
Args:
image (ndarray): image array to be segmented.
imageid (str): imageid (to select image-specific regions if not a global regionset)
crop (bool): if True, return image cropped to bounding box of selected area
image_format (str): image format that PIL understands (will also be used for extension)
rescale (bool): if True, scale pixel values to full 0..255 range
before saving (e.g., for saliency maps)
ignore_background (bool): if True, ignore auto-generated background region
"""
apply_regions = self._select_region(imageid)
apply_labels = self._select_labels(imageid)
imstr = '{:s}_{:s}.{:s}'
for idx, region in enumerate(apply_regions):
if ignore_background and apply_labels[idx] == '__BG__':
continue
mask = (region == True)
out = np.zeros(image.shape)
out[mask] = image[mask]
if crop:
a = np.argwhere(out)
if a.shape[0] > 0:
(ul_x, ul_y) = a.min(0)[0:2]
(br_x, br_y) = a.max(0)[0:2]
out = out[ul_x:br_x+1, ul_y:br_y+1]
if imageid is None or imageid == '*':
imageid = 'image'
if rescale:
out = (out - out.min()) / out.max() * 255.0
else:
out *= 255.0
rimg = Image.fromarray(np.array(out, np.uint8))
rimg.save(imstr.format(str(imageid), str(apply_labels[idx]), image_format), image_format)
def export_patches_from_set(self, imageset, crop=True, image_format='png', rescale=False, ignore_background=True):
""" Save all sliced image patches from an ImageSet as image files.
Saves an image of each image part "cut out" by each region mask, cropped by default.
If the RegionSet is not global, only images with valid region masks will be processed.
Args:
imageset (ImageSet): a valid ImageSet containing images to slice
imageid (str): imageid (to select image-specific regions if not a global regionset)
crop (bool): if True, return image cropped to bounding box of selected area
image_format (str): image format that PIL understands (will also be used for extension)
rescale (bool): if True, scale pixel values to full 0..255 range
before saving (e.g., for saliency maps)
ignore_background (bool): if True, ignore auto-generated background region
"""
if not isinstance(imageset, ImageSet):
raise TypeError('First argument must be an ImageSet! To slice a single image, use export_patches().')
for cimg in imageset.imageids:
if not self.is_global and cimg not in self.imageids:
print('Warning: RegionSet contains image-specific regions, but no regions available for {:s}. Skipped.'.format(cimg))
else:
self.export_patches(imageset[cimg], imageid=cimg, crop=crop, image_format=image_format,
rescale=rescale, ignore_background=ignore_background)
def fixated(self, fixations, var='fixated', imageid=None, exclude_first=False, exclude_last=False):
""" Returns visited / fixated regions using data from a Fixations object.
Args:
fixations (Fixations/DataFrame): fixation data to test against regions
var (str): type of fixation mapping variable to calculate (default: 'fixated'):
'fixated': fixation status: 0 - region was not fixated, 1 - fixated (default)
'count': total number of fixations on each region
'fixid': fixation ID (from input dataset) for first fixation in each region
imageid (str): imageid (to select image-specific regions if not a global regionset)
exclude_first (bool): if True, first fixated region will always be returned as NaN
exclude_last (str): controls how to deal with regions receiving the last image fixation:
'never' or False: do not handle the last fixation specially
'always' or True: drop the entire region if it received the last fixation at any time
'pass': exclude viewing pass (one or multiple fixations) that received the last fixation
Returns:
1D ndarray (float) containing number of fixations per region (if count=True)
or the values 0.0 (region was not fixated) or 1.0 (region was fixated)
"""
if type(exclude_last) == bool:
if exclude_last:
exclude_last = 'always'
elif not exclude_last:
exclude_last = 'never'
apply_regions = self._select_region(imageid)
vis = np.zeros(apply_regions.shape[0], dtype=float)
# Drop out-of-bounds fixations
fix = fixations.data[(fixations.data[fixations._xpx] >= 0) &
(fixations.data[fixations._xpx] < self.size[0]) &
(fixations.data[fixations._ypx] >= 0) &
(fixations.data[fixations._ypx] < self.size[1])]
if len(fix) > 0:
if exclude_first:
first_fix = fixations.data[fixations.data[fixations._fixid] == min(fixations.data[fixations._fixid])]
if len(first_fix) > 1:
print('Warning: you have requested to drop the first fixated region, but more than one ' +
'location ({:d}) matches the lowest fixation ID! Either your fixation ' .format(len(first_fix)) +
'IDs are not unique or the passed dataset contains data from multiple images or conditions.')
if exclude_last != 'never':
last_fix = fixations.data[fixations.data[fixations._fixid] == max(fixations.data[fixations._fixid])]
if len(last_fix) > 1:
print('Warning: you have requested to drop the last fixated region, but more than one ' +
'location ({:d}) matches the highest fixation ID! Either your fixation ' .format(len(last_fix)) +
'IDs are not unique or the passed dataset contains data from multiple images or conditions.')
for (idx, roi) in enumerate(apply_regions):
if exclude_first:
try:
is_first = roi[first_fix[fixations._ypx], first_fix[fixations._xpx]]
if isinstance(is_first, np.ndarray) and np.any(is_first):
vis[idx] = np.nan
continue
elif is_first:
vis[idx] = np.nan
continue
except IndexError:
pass # last fixation is out of bounds for image!
if exclude_last == 'always':
try:
is_last = roi[last_fix[fixations._ypx], last_fix[fixations._xpx]]
if isinstance(is_last, np.ndarray) and np.any(is_last):
vis[idx] = np.nan
continue
elif is_last:
vis[idx] = np.nan
continue
except IndexError:
pass # last fixation is out of bounds for image!
fv = roi[fix[fixations._ypx], fix[fixations._xpx]]
if np.any(fv):
rfix = fix[fv] # All fixations on region
if fixations.has_times:
# If fixation data has timing information, ensure to drop fixations
# that began before the current fixation report
bystart = rfix[rfix[fixations._fixstart] >= 0].sort_values(fixations._fixid)
else:
bystart = rfix.sort_values(fixations._fixid)
if len(bystart) > 0:
# Find viewing passes (sets of in-region fixations without leaving region)
idxvalid = np.ones(bystart.shape[0], dtype=np.bool) # fix indices to keep
idxdiff = bystart[fixations._fixid].diff().reset_index(drop=True)
pass_onsets = idxdiff.index.values[(idxdiff > 1)].tolist()
num_refix = len(pass_onsets)
num_passes = num_refix + 1
if len(pass_onsets) >= 1:
end_first_pass = pass_onsets[0]
else:
end_first_pass = bystart.shape[0]
# If requested, remove pass containing the last fixation
if exclude_last == 'pass':
passes = [0,] + pass_onsets + [len(bystart)+1,]
for pidx in range(0, len(passes)-1):
passfix = bystart.iloc[passes[pidx]:passes[pidx+1], :]
if last_fix.index.values[0] in passfix.index:
# Exclude this and all following passes. Note that no later passes
# should exist unless there is an index error in the fixation data!
idxvalid[passes[pidx]:] = False
break
if np.all(idxvalid == False):
# If no valid fixations remain, drop the whole region (NA)
vis[idx] = np.nan
continue
else:
# Keep only valid fixations for fixation count measures
bystart = bystart.loc[idxvalid, :]
num_refix = np.sum(idxvalid[pass_onsets] == True)
num_passes = num_refix + 1
# Calculate fixation status measures
if var == 'count':
# Number of fixations in region
vis[idx] = bystart.shape[0]
elif var == 'fixated':
# Binary coding of fixation status
vis[idx] = (bystart.shape[0] >= 1.0)
elif var == 'fixid':
# Return first valid fixation ID in region
if bystart.shape[0] >= 1.0:
vis[idx] = bystart.loc[bystart.index[0], fixations._fixid]
else:
vis[idx] = np.nan
elif var == 'passes':
# Total number of fixation passes
vis[idx] = num_passes
elif var == 'refix':
# Total number of fixation passes
vis[idx] = num_refix
else:
# No fixations in region -> fixID should be NA
if var == 'fixid':
vis[idx] = np.nan
return vis
def fixtimes(self, fixations, var='total', imageid=None, exclude_first=False, exclude_last=False):
""" Returns fixation-timing based variable for each region. Default is total viewing time.
Args:
fixations (Fixations/DataFrame): fixation data to test against regions
var (str): type of fixation time variable to calculate (default: 'total'):
'total': total fixation time for each region
'gaze': gaze duration, i.e. total fixation time in first pass
'first': first fixation duration per region
'single': fixation duration if region was fixated exactly once
'tofirst': start time of the first fixation on each region
imageid (str): imageid (to select image-specific regions if not a global regionset)
exclude_first (bool): if True, first fixated region will always be returned as NaN
exclude_last (str): controls how to deal with regions receiving the last image fixation:
'never' or False: do not handle the last fixation specially
'always' or True: drop the entire region if it received the last fixation at any time
'pass': exclude viewing pass (one or multiple fixations) that received the last fixation
Returns:
1D ndarray (float) containing fixation time based dependent variable for each region.
Regions that were never fixated according to criteria will be returned as NaN.
"""
if var not in ['total', 'gaze', 'first', 'single', 'tofirst']:
raise ValueError('Unknown fixation time variable specified: {:s}'.format(var))
if not fixations.has_times:
raise AttributeError('Trying to extract a time-based DV from a dataset without fixation timing information! Specify fixstart=/fixend= when loading fixation data!')
if type(exclude_last) == bool:
if exclude_last:
exclude_last = 'always'
elif not exclude_last:
exclude_last = 'never'
apply_regions = self._select_region(imageid)
ft = np.ones(apply_regions.shape[0], dtype=float) * np.nan
# Drop out-of-bounds fixations
fix = fixations.data[(fixations.data[fixations._xpx] >= 0) &
(fixations.data[fixations._xpx] < self.size[0]) &
(fixations.data[fixations._ypx] >= 0) &
(fixations.data[fixations._ypx] < self.size[1])]
if len(fix) > 0:
if exclude_first:
first_fix = fixations.data[fixations.data[fixations._fixid] == min(fixations.data[fixations._fixid])]
if len(first_fix) > 1:
print('Warning: you have requested to drop the first fixated region, but more than one ' +
'location ({:d}) matches the lowest fixation ID! Either your fixation ' .format(len(first_fix)) +
'IDs are not unique or the passed dataset contains data from multiple images or conditions.')
if exclude_last != 'never':
last_fix = fixations.data[fixations.data[fixations._fixid] == max(fixations.data[fixations._fixid])]
if len(last_fix) > 1:
print('Warning: you have requested to drop the last fixated region, but more than one ' +
'location ({:d}) matches the highest fixation ID! Either your fixation ' .format(len(last_fix)) +
'IDs are not unique or the passed dataset contains data from multiple images or conditions.')
for (idx, roi) in enumerate(apply_regions):
if exclude_first:
try:
is_first = roi[first_fix[fixations._ypx], first_fix[fixations._xpx]]
if isinstance(is_first, np.ndarray) and np.any(is_first):
ft[idx] = np.nan
continue
elif is_first:
ft[idx] = np.nan
continue
except IndexError:
pass # first fixation is out of bounds for image!
if exclude_last == 'always':
# If this region has the last fixation, drop it here (NaN) and move on
try:
is_last = roi[last_fix[fixations._ypx], last_fix[fixations._xpx]]
if isinstance(is_last, np.ndarray) and np.any(is_last):
ft[idx] = np.nan
continue
elif is_last:
ft[idx] = np.nan
continue
except IndexError:
pass # last fixation is out of bounds for image!
fidx = roi[fix[fixations._ypx], fix[fixations._xpx]]
if np.any(fidx):
rfix = fix[fidx] # all fixations in this region
bystart = rfix[rfix[fixations._fixstart] >= 0].sort_values(fixations._fixid)
if len(bystart) > 0:
# Find viewing passes (sets of in-region fixations without leaving region)
idxvalid = np.ones(bystart.shape[0], dtype=np.bool) # fix indices to keep
idxdiff = bystart[fixations._fixid].diff().reset_index(drop=True)
pass_onsets = idxdiff.index.values[(idxdiff > 1)].tolist()
num_refix = len(pass_onsets)
num_passes = num_refix + 1
if len(pass_onsets) >= 1:
end_first_pass = pass_onsets[0]
else:
end_first_pass = bystart.shape[0]
# If requested, remove pass containing the last fixation
if exclude_last == 'pass':
passes = [0,] + pass_onsets + [len(bystart)+1,]
for pidx in range(0, len(passes)-1):
passfix = bystart.iloc[passes[pidx]:passes[pidx+1], :]
if last_fix.index.values[0] in passfix.index:
# Exclude this and all following passes. Note that no later passes
# should exist unless there is an index error in the fixation data!
idxvalid[passes[pidx]:] = False
break
if np.all(idxvalid == False):
# If no valid fixations remain, drop the whole region (NA)
ft[idx] = np.nan
continue
else:
# Keep only valid fixations for fixation count measures
bystart = bystart.loc[idxvalid, :]
num_refix = np.sum(idxvalid[pass_onsets] == True)
num_passes = num_refix + 1
# Calculate fixation timing measures
if var == 'gaze':
# Gaze duration: total viewing time of first pass only
ft[idx] = sum(bystart.loc[bystart.index[0:end_first_pass], fixations._fixdur])
elif var == 'first':
# First fixation duration
ft[idx] = bystart.loc[bystart.index[0], fixations._fixdur]
elif var == 'single':
# Single fixation duration (=first fixation duration if not refixated in first pass)
ft[idx] = bystart.loc[bystart.index[0], fixations._fixdur]
# If refixated on first pass, set to NaN instead
if end_first_pass > 1:
ft[idx] = np.nan
elif var == 'tofirst':
# Time until first fixation / first fixation onset
ft[idx] = bystart.loc[bystart.index[0], fixations._fixstart]
elif var == 'total':
# Total viewing time of valid fixations
ft[idx] = sum(bystart.loc[:, fixations._fixdur])
return ft
class GridRegionSet(RegionSet):
""" RegionSet defining an n-by-m regular grid covering the full image size.
Attributes:
cells (list): list of bounding box tuples for each cell,
each formatted as (left, top, right, bottom)
gridsize (tuple): grid dimensions as (width, height). If unspecified,
gridfix will try to choose a sensible default.
label (string): optional label to distinguish between RegionSets
"""
def __init__(self, size, gridsize=None, label=None, region_labels=None):
""" Create a new grid RegionSet
Args:
size (tuple): image dimensions, specified as (width, height).
gridsize(tuple): grid dimensions, specified as (width, height).
region_labels (string): list of optional region labels (default: cell#)
"""
if gridsize is None:
gridsize = self._suggest_grid(size)
print('Note: no grid size was specified. Using {:d}x{:d} based on image size.'.format(gridsize[0], gridsize[1]))
(regions, cells) = self._grid(size, gridsize)
RegionSet.__init__(self, size=size, regions=regions, label=label, region_labels=region_labels,
add_background=False) # GridRegionSets are exhaustive, so the 'background' is empty.
self.gridsize = gridsize
# List of region bboxes
self.cells = cells
def __str__(self):
""" Short string representation for printing """
r = '<gridfix.GridRegionSet{:s}, size={:s}, {:d}x{:d} grid, {:d} cell{:s}, memory={:.1f} kB>'
if self.label is not None:
lab = ' ({:s})'.format(self.label)
else:
lab = ''
num_s = ''
num_r = len(self)
if num_r > 1:
num_s = 's'
return r.format(lab, str(self.size), self.gridsize[0], self.gridsize[1], num_r,
num_s, self.memory_usage)
def _suggest_grid(self, size):
""" Suggest grid dimensions based on image size.
Args:
size (tuple): image dimensions, specified as (width, height).
Returns:
Suggested grid size tuple as (width, height).
"""
aspect = Fraction(size[0], size[1])
s_width = aspect.numerator
s_height = aspect.denominator
if s_width < 6:
s_width *= 2
s_height *= 2
return (s_width, s_height)
def _grid(self, size, gridsize):
""" Build m-by-n (width,height) grid as 3D nparray.
Args:
size (tuple): image dimensions, specified as (width, height).
gridsize(tuple): grid dimensions, specified as (width, height).
Returns:
tuple containing the grid regions and their bounding box coordinates
as (grid, cells):
grid (numpy.ndarray): regions for RegionSet creation
cells (list): list of bounding box tuples for each cell,
each formatted as (left, top, right, bottom)
"""
(width, height) = size
_msize = (size[1], size[0])
cell_x = int(width / gridsize[0])
cell_y = int(height / gridsize[1])
n_cells = int(gridsize[0] * gridsize[1])
grid = np.zeros((n_cells,) + _msize, dtype=bool)
cells = []
# Sanity check: do nothing if image dimensions not cleanly divisible by grid
if width % gridsize[0] > 0 or height % gridsize[1] > 0:
e = 'Error: image dimensions not cleanly divisible by grid! image=({:d}x{:d}), grid=({:d}x{:d})'
raise ValueError(e.format(width, height, gridsize[0], gridsize[1]))
# Create a mask of 1s/True for each cell
cellno = 0
for y_es in range(0, height, cell_y):
for x_es in range(0, width, cell_x):
mask = np.zeros(_msize, dtype=bool)
mask[y_es:y_es + cell_y, x_es:x_es + cell_x] = True
grid[cellno,...] = mask
cells.append((x_es, y_es, x_es + cell_x, y_es + cell_y))
cellno += 1
return (grid, cells)
class BBoxRegionSet(RegionSet):
""" RegionSet based on rectangular bounding boxes.
Attributes:
cells (list): list of bounding box tuples for each cell,
each formatted as (left, top, right, bottom)
label (string): optional label to distinguish between RegionSets
from_file (string): filename in case regions were loaded from file
padding (tuple): padding in pixels as ('left', 'top', 'right', 'bottom')
"""
def __init__(self, size, bounding_boxes, label=None, region_labels=None, sep='\t',
imageid='imageid', regionid='regionid', bbox_cols=('x1', 'y1', 'x2', 'y2'),
padding=0, add_background=False, coord_format=None):
""" Create new BBoxRegionSet
Args:
size (tuple): image dimensions, specified as (width, height).
bounding_boxes: one of the following:
name of a text/CSV file with columns ([imageid], [regionid], x1, y1, x2, y2)
list of 4-tuples OR 2D ndarray with columns (x1, y1, x2, y2) for global bboxes
region_labels (str): list of optional region labels if bounding_boxes is a global array/list
imageid (str): name of imageid column in input file (if not present, bboxes will be treated as global)
regionid (str): name of regionid column in input file
sep (str): separator to use when reading files
bbox_cols: tuple of column names for ('left', 'top', 'right', 'bottom')
padding (int): optional bbox padding in pixels as ('left', 'top', 'right', 'bottom'),
or a single integer to specify equal padding on all sides
add_background (bool): if True, this creates a special region to capture all
fixations that don't fall on an explicit region ("background" fixations)
coord_format (str): Defines how input x and y coordinates are interpreted:
'oneindexed': coordinates start at 1, e.g. 1..100 for a 100px box
'zeroindexed': coordinates start at 0, e.g. 0..99 for a 100px box
'apple': coordinates start at 0, but end at <size>, e.g. 0..100 for a 100px box,
in this convention, the pixels sit "between" coordinate values
"""
self.input_file = None
self.input_df = None
self._imageid = imageid
self._regionid = regionid
self._cols = bbox_cols
if coord_format is None:
err = 'No coordinate format specified! Please provide coord_format argument:\n'
err += '"oneindexed": coordinates start at 1, e.g. 1..100 for a 100px box\n'
err += '"zeroindexed": coordinates start at 0, e.g. 0..99 for a 100px box\n'
err += '"apple": coordinates start at 0, but end at <size>, e.g. 0..100 for a 100px box.'
raise ValueError(err)
if type(padding) == int:
self.padding = (padding,) * 4
else:
self.padding = padding
if isinstance(bounding_boxes, DataFrame):
# Passed a DataFrame
bbox = bounding_boxes
elif type(bounding_boxes) == str:
# Passed a file name
try:
bbox = read_table(bounding_boxes, sep=sep)
self.input_file = bounding_boxes
except:
raise ValueError('String argument supplied to BBoxRegionSet, but not a valid CSV file!')
else:
# Try array type
try:
bbox = | DataFrame(bounding_boxes, columns=['x1', 'y1', 'x2', 'y2']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
.. module:: geek
:platform: Unix, Windows
:synopsis: GEneralised Elementary Kinetics
.. moduleauthor:: geek team
[---------]
Copyright 2018 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time as tim
import numpy as np
import random
from pandas import DataFrame, read_csv
AVOGADRO_NUMBER = 6e23
"""
The following script shall provide a validation of GEEK and the brownian reaction dynamics by comparing its results with
other approaches for uni-molecular and for bi-molecular reaction
We show that the geek frame work is able to capture the behaviour by different simulation techniques
"""
"""
Diffusion controlled conditions
"""
# gamma = 4*pi*4e-9*200e-12*1000*6e23 ~ 6e9
parameters_diff_lim = {
'K_eq': 50e-6, # M
'k_fwd': 5e9, # 1/Ms
'r_A': 2e-9, # m
'r_B': 2e-9, # m
'r_C': 3e-9, # m
'D_A': 500e-12, # m^2/s
'D_B': 500e-12, # m^2/s
'D_C': 350e-12, # m^2/s
'm_A': 10, # kDa
'm_B': 10, # kDa
'm_C': 20, # kDa
'A_0': 50e-6, # M
'B_0': 50e-6, # M
'C_0': 0, # M
'volume': 1e-18, # L
't_max': 1e-5, # s
'dt': 1.e-9, # s
'mu_mass': 21.1, # s
'du': 1e9 # kbT
}
"""
Reaction limited conditions
"""
# gamma = 4*pi*4e-9*200e-12*1000*6e23 ~ 6e9
parameters_reaction_lim = {
'K_eq': 50e-6, # M
'k_fwd': 5e7, # 1/Ms
'r_A': 2e-9, # m
'r_B': 2e-9, # m
'r_C': 3e-9, # m
'D_A': 500e-12, # m^2/s
'D_B': 500e-12, # m^2/s
'D_C': 350e-12, # m^2/s
'm_A': 10, # kDa
'm_B': 10, # kDa
'm_C': 20, # kDa
'A_0': 50e-6, # M
'B_0': 50e-6, # M
'C_0': 0, # M
'volume': 1e-18, # L
't_max': 1e-3, # s
'dt': 1e-9, # s
'mu_mass': 21.1, # s
'du': 1e9 # kbT
}
"""
Helper functions
"""
def mass2rad(mass):
radius = 0.0515*(mass*1000)**(0.393) # Mass in kDa
return radius #Radius in nm
def rad2mass(radius):
M = (radius/0.0515)**(1./0.393)/1000.0 #Radius in nm
return M
def rad2diff(radius):
viscosity = 0.7e-3 # Pa s
temperatur = 310.15 # K
kbT = temperatur*1.38064852e-23
D = kbT/(6*np.pi*viscosity*radius) #Radius in m
return D # in m^2/s
from numpy import pi,sqrt,exp
from scipy.special import erfc
def calc_effective_volume(diffusion, dist, delta_t):
""" Normalization factor for Brownian dyanamics simulation """
# Bi mol rxn scaling
sig_2 = 4.0 * delta_t * diffusion
sig = sqrt(sig_2)
exp_4_r_sig = exp(-4.0 * dist ** 2 / sig_2)
# Expresion
A = (sig ** 3 - 2.0 * dist ** 2 * sig) * exp_4_r_sig
B = 6.0 * dist ** 2 * sig - sig ** 3 + 4.0 * sqrt(pi) * dist ** 3 * erfc(2.0 * dist / sig)
effective_volume = 4.0 * pi * (A + B) / 12.0 / sqrt(pi)
return effective_volume
"""
Simulation functions
"""
def geek_simulations_hardsphere(parameters, sim_type, phi= 0.0, seed=1):
from geek.analysis import geek_regression
if sim_type == 'diff':
df = read_csv('../data/validation_diffusion_lim_hardsphere.csv')
elif sim_type == 'react':
df = read_csv('../data/validation_reaction_lim_hardsphere.csv')
else:
raise ValueError('{} is not a valid input'.format(sim_type))
# Reference concentrations
reference_concentrations = [50e-6,]*3
concentrations = ['A_concentration',
'B_concentration',
'C_concentration',]
this_df = df[(df['volume_fraction'] == phi)]
# Extract the GEEK parameters from Linear regression
k1_fwd_params = geek_regression(this_df,
concentrations,
reference_concentrations,
'k1_fwd_relative',
verbose=True)
k1_bwd_params = geek_regression(this_df,
concentrations,
reference_concentrations,
'k1_bwd_relative',
verbose=True)
random.seed(seed)
#Map to parameter dict
param_dict = {
'k_1f0': parameters['k_fwd'],
'k_1b0': parameters['k_fwd']*parameters['K_eq'],
'beta_1f': k1_fwd_params['beta_lb'] + (k1_fwd_params['beta_ub'] - k1_fwd_params['beta_lb']) * random.random(),
'alpha_A_1f': k1_fwd_params['alpha_A_concentration_lb'] + (
k1_fwd_params['alpha_A_concentration_ub'] - k1_fwd_params[
'alpha_A_concentration_lb']) * random.random(),
'alpha_B_1f': k1_fwd_params['alpha_B_concentration_lb'] + (
k1_fwd_params['alpha_B_concentration_ub'] - k1_fwd_params[
'alpha_B_concentration_lb']) * random.random(),
'alpha_C_1f': k1_fwd_params['alpha_C_concentration_lb'] + (
k1_fwd_params['alpha_C_concentration_ub'] - k1_fwd_params[
'alpha_C_concentration_lb']) * random.random(),
'beta_1b': k1_bwd_params['beta_lb'] + (k1_bwd_params['beta_ub'] - k1_bwd_params['beta_lb']) * random.random(),
'alpha_A_1b': k1_bwd_params['alpha_A_concentration_lb'] + (
k1_bwd_params['alpha_A_concentration_ub'] - k1_bwd_params[
'alpha_A_concentration_lb']) * random.random(),
'alpha_B_1b': k1_bwd_params['alpha_B_concentration_lb'] + (
k1_bwd_params['alpha_B_concentration_ub'] - k1_bwd_params[
'alpha_B_concentration_lb']) * random.random(),
'alpha_C_1b': k1_bwd_params['alpha_C_concentration_lb'] + (
k1_bwd_params['alpha_C_concentration_ub'] - k1_bwd_params[
'alpha_C_concentration_lb']) * random.random(),
'A0': reference_concentrations[0],
'B0': reference_concentrations[1],
'C0': reference_concentrations[2],
}
"""
Declare ODE-Problem
"""
from sympy import symbols
from sympy import exp as sym_exp
# Variables
A, B, C = symbols(['A', 'B', 'C'])
variables = [A, B, C,]
# Parameters
k_1f0, k_1b0, = symbols(['k_1f0', 'k_1b0',] )
# Define symbols for the GEEK parameters
beta_1f, beta_1b, = symbols(['beta_1f', 'beta_1b',] )
alpha_A_1f, alpha_A_1b, = symbols(['alpha_A_1f', 'alpha_A_1b',])
alpha_B_1f, alpha_B_1b, = symbols(['alpha_B_1f', 'alpha_B_1b',])
alpha_C_1f, alpha_C_1b, = symbols(['alpha_C_1f', 'alpha_C_1b',])
A0,B0,C0 = symbols(['A0', 'B0', 'C0'])
ode_params = [k_1f0, k_1b0,
beta_1f, beta_1b ,
alpha_A_1b, alpha_A_1f ,
alpha_B_1b, alpha_B_1f,
alpha_C_1f, alpha_C_1b,
A0, B0, C0]
# Reactions
geek_reactions = {
'r_1f': k_1f0 * A * B * sym_exp(beta_1f) * (A / A0) ** alpha_A_1f * (B / B0) ** alpha_B_1f * (
C / C0) ** alpha_C_1f,
'r_1b': k_1b0 * C * sym_exp(beta_1b) * (A / A0) ** alpha_A_1b * (B / B0) ** alpha_B_1b * (
C / C0) ** alpha_C_1b
}
#Expressions
expressions = {
A: geek_reactions['r_1b'] - geek_reactions['r_1f'],
B: geek_reactions['r_1b'] - geek_reactions['r_1f'],
C: geek_reactions['r_1f'] - geek_reactions['r_1b'],
}
from geek.analysis.ode_function import OdeFun
fun = OdeFun(variables,ode_params,expressions)
from scipy.integrate import ode
r = ode(fun).set_integrator('vode', method='bdf')
eps = 1e-3
A0 = round(parameters['A_0']*AVOGADRO_NUMBER*parameters['volume'])/AVOGADRO_NUMBER/parameters['volume']
B0 = round(parameters['B_0']*AVOGADRO_NUMBER*parameters['volume'])/AVOGADRO_NUMBER/parameters['volume']
C0 = round(parameters['C_0']*AVOGADRO_NUMBER*parameters['volume'])/AVOGADRO_NUMBER/parameters['volume']
print(A0,B0,C0)
y0 = [A0 * (1. - eps),
B0 * (1. - eps),
A0 * eps]
t0 = 0.0
r.set_initial_value(y0, t0).set_f_params(param_dict)
data = []
scale = parameters['volume']*AVOGADRO_NUMBER
while r.successful() and r.t < parameters['t_max']:
data.append( np.append(r.t + parameters['t_max']/1000.0,
r.integrate(r.t + parameters['t_max']/1000.0) * scale))
data = np.array(data)
df = DataFrame(data=data, columns = ['time', 'A', 'B', 'C'])
return df
def openbread_simulation(parameters, phi= 0.0, seed=1):
from openbread.core import Species,ParticleModel,Reaction
# Construct species in the model
A = Species(name='A',
diffusion_constant=parameters['D_A'],
collision_radius=parameters['r_A'],
mass=parameters['m_A'],)
B = Species(name='B',
diffusion_constant=parameters['D_B'],
collision_radius=parameters['r_B'],
mass=parameters['m_B'],)
C = Species(name='C',
diffusion_constant=parameters['D_C'],
collision_radius=parameters['r_C'],
mass=parameters['m_C'],)
species = [A, B, C, ]
# Define microscopic reaction rate constants:
k1f = parameters['k_fwd'] # 1/Ms
k1b = parameters['k_fwd'] * parameters['K_eq'] # 1/s
# Setup particle simulation environemnt
volume = parameters['volume'] # (0.1 mum)^3 in L
medium = ParticleModel.Medium(viscosity=0.7e-3, # Pa s
temperatur=310.15)
crowding = ParticleModel.Crowding(volume_fraction=phi,
mu=np.log(parameters['mu_mass']),
sigma=0,
max_size=3e-3) # For this model the max size is 3nm
particle_model = ParticleModel(medium,
crowding,
volume)
particle_model.add_reaction(Reaction('r1f', {A: -1, B: -1, C: 1}, k1f))
particle_model.add_reaction(Reaction('r1b', {A: 1, B: 1, C: -1}, k1b))
# Define initial conditions
particle_model.initial_conditions['A'] = parameters['A_0']
particle_model.initial_conditions['B'] = parameters['B_0']
particle_model.initial_conditions['C'] = parameters['C_0']
Nt = parameters['t_max']/parameters['dt']
result = particle_model.simulate(dt=parameters['dt'],
max_time=parameters['t_max'],
log_step=max(int(Nt/1000),1),
n_sample=0,
random_seed=seed,
is_hardsphere=True,
is_constant_state=False,
t_equlibriate=0.0)
# Write in a data frame
data = np.array([result.time, result.species['A'], result.species['B'], result.species['C'] ])
df = DataFrame(data=data.T, columns = ['time', 'A', 'B', 'C'])
return df
def crowder_free_simulation(parameters, phi=0.0, seed=1):
from paper.crwdfree.crowder_free_simulation import crowder_free_simulation_method, particle, check_collision
result = crowder_free_simulation_method(parameters, phi, seed)
# Write in a data frame
data = np.array([result.time, result.species['A'], result.species['B'], result.species['C'] ])
df = DataFrame(data=data.T, columns = ['time', 'A', 'B', 'C'])
return df
def geek_simulations_crwderfree(parameters, sim_type, phi= 0.0, seed=1):
from geek.analysis import geek_regression
if sim_type == 'diff':
df = | read_csv('../data/validation_diffusion_lim_crowderfree.csv') | pandas.read_csv |
# This file is part of Patsy
# Copyright (C) 2011-2015 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# This file defines the core design matrix building functions.
# These are made available in the patsy.* namespace
__all__ = ["design_matrix_builders", "build_design_matrices"]
import itertools
import six
import numpy as np
from patsy import PatsyError
from patsy.categorical import (guess_categorical,
CategoricalSniffer,
categorical_to_int)
from patsy.util import (atleast_2d_column_default,
have_pandas, asarray_or_pandas,
safe_issubdtype)
from patsy.design_info import (DesignMatrix, DesignInfo,
FactorInfo, SubtermInfo)
from patsy.redundancy import pick_contrasts_for_term
from patsy.eval import EvalEnvironment
from patsy.contrasts import code_contrast_matrix, Treatment
from patsy.compat import OrderedDict
from patsy.missing import NAAction
if have_pandas:
import pandas
class _MockFactor(object):
def __init__(self, name="MOCKMOCK"):
self._name = name
def eval(self, state, env):
return env["mock"]
def name(self):
return self._name
def _max_allowed_dim(dim, arr, factor):
if arr.ndim > dim:
msg = ("factor '%s' evaluates to an %s-dimensional array; I only "
"handle arrays with dimension <= %s"
% (factor.name(), arr.ndim, dim))
raise PatsyError(msg, factor)
def test__max_allowed_dim():
import pytest
f = _MockFactor()
_max_allowed_dim(1, np.array(1), f)
_max_allowed_dim(1, np.array([1]), f)
pytest.raises(PatsyError, _max_allowed_dim, 1, np.array([[1]]), f)
pytest.raises(PatsyError, _max_allowed_dim, 1, np.array([[[1]]]), f)
_max_allowed_dim(2, np.array(1), f)
_max_allowed_dim(2, np.array([1]), f)
_max_allowed_dim(2, np.array([[1]]), f)
pytest.raises(PatsyError, _max_allowed_dim, 2, np.array([[[1]]]), f)
def _eval_factor(factor_info, data, NA_action):
factor = factor_info.factor
result = factor.eval(factor_info.state, data)
# Returns either a 2d ndarray, or a DataFrame, plus is_NA mask
if factor_info.type == "numerical":
result = atleast_2d_column_default(result, preserve_pandas=True)
_max_allowed_dim(2, result, factor)
if result.shape[1] != factor_info.num_columns:
raise PatsyError("when evaluating factor %s, I got %s columns "
"instead of the %s I was expecting"
% (factor.name(),
factor_info.num_columns,
result.shape[1]),
factor)
if not safe_issubdtype(np.asarray(result).dtype, np.number):
raise PatsyError("when evaluating numeric factor %s, "
"I got non-numeric data of type '%s'"
% (factor.name(), result.dtype),
factor)
return result, NA_action.is_numerical_NA(result)
# returns either a 1d ndarray or a pandas.Series, plus is_NA mask
else:
assert factor_info.type == "categorical"
result = categorical_to_int(result, factor_info.categories, NA_action,
origin=factor_info.factor)
assert result.ndim == 1
return result, np.asarray(result == -1)
def test__eval_factor_numerical():
import pytest
naa = NAAction()
f = _MockFactor()
fi1 = FactorInfo(f, "numerical", {}, num_columns=1, categories=None)
assert fi1.factor is f
eval123, is_NA = _eval_factor(fi1, {"mock": [1, 2, 3]}, naa)
assert eval123.shape == (3, 1)
assert np.all(eval123 == [[1], [2], [3]])
assert is_NA.shape == (3,)
assert np.all(~is_NA)
pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [[[1]]]}, naa)
pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [[1, 2]]}, naa)
pytest.raises(PatsyError, _eval_factor, fi1, {"mock": ["a", "b"]}, naa)
pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [True, False]}, naa)
fi2 = FactorInfo(_MockFactor(), "numerical",
{}, num_columns=2, categories=None)
eval123321, is_NA = _eval_factor(fi2,
{"mock": [[1, 3], [2, 2], [3, 1]]},
naa)
assert eval123321.shape == (3, 2)
assert np.all(eval123321 == [[1, 3], [2, 2], [3, 1]])
assert is_NA.shape == (3,)
assert np.all(~is_NA)
pytest.raises(PatsyError, _eval_factor, fi2, {"mock": [1, 2, 3]}, naa)
pytest.raises(PatsyError, _eval_factor, fi2, {"mock": [[1, 2, 3]]}, naa)
ev_nan, is_NA = _eval_factor(fi1, {"mock": [1, 2, np.nan]},
NAAction(NA_types=["NaN"]))
assert np.array_equal(is_NA, [False, False, True])
ev_nan, is_NA = _eval_factor(fi1, {"mock": [1, 2, np.nan]},
NAAction(NA_types=[]))
assert np.array_equal(is_NA, [False, False, False])
if have_pandas:
eval_ser, _ = _eval_factor(fi1,
{"mock":
pandas.Series([1, 2, 3],
index=[10, 20, 30])},
naa)
assert isinstance(eval_ser, pandas.DataFrame)
assert np.array_equal(eval_ser, [[1], [2], [3]])
assert np.array_equal(eval_ser.index, [10, 20, 30])
eval_df1, _ = _eval_factor(fi1,
{"mock":
pandas.DataFrame([[2], [1], [3]],
index=[20, 10, 30])},
naa)
assert isinstance(eval_df1, pandas.DataFrame)
assert np.array_equal(eval_df1, [[2], [1], [3]])
assert np.array_equal(eval_df1.index, [20, 10, 30])
eval_df2, _ = _eval_factor(fi2,
{"mock":
pandas.DataFrame([[2, 3], [1, 4], [3, -1]],
index=[20, 30, 10])},
naa)
assert isinstance(eval_df2, pandas.DataFrame)
assert np.array_equal(eval_df2, [[2, 3], [1, 4], [3, -1]])
assert np.array_equal(eval_df2.index, [20, 30, 10])
pytest.raises(PatsyError,
_eval_factor, fi2,
{"mock": | pandas.Series([1, 2, 3], index=[10, 20, 30]) | pandas.Series |
"""
This script loads Google and Apple Mobility reports, builds cleaned reports in different formats and builds merged files from both sources.
Original data:
- Google Community Mobility reports: https://www.google.com/covid19/mobility/
- Apple Mobility Trends reports: https://www.apple.com/covid19/mobility
"""
import io
import os
import datetime
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import re
import json
import pandas as pd
def get_google_link():
'''Get link of Google Community Mobility report file
Returns:
link (str): link of Google Community report file
'''
# get webpage source
url = 'https://www.google.com/covid19/mobility/'
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
csv_tag = soup.find('a', {"class": "icon-link"})
link = csv_tag['href']
return link
def download_google_reports(directory="google_reports"):
'''Download Google Community Mobility report in CSV format
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not new files have been downloaded
'''
new_files = False
# create directory if it don't exist
if not os.path.exists(directory):
os.makedirs(directory)
# download CSV file
link = get_google_link()
file_name = "Global_Mobility_Report.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Google: No updates')
else:
print('Google: Update available')
return new_files
def build_google_report(
source="Global_Mobility_Report.csv",
destination="mobility_report.csv",
report_type="regions"):
'''Build cleaned Google report for worldwide or for some country (currently only for the US)
Args:
source: location of the raw Google CSV report
destination: destination file path
report_type: two options available: "regions" - report for worldwide, "US" - report for the US
'''
df = pd.read_csv(source, low_memory=False)
df = df.drop(columns=['country_region_code'])
df = df.rename(
columns={
'country_region': 'country',
'retail_and_recreation_percent_change_from_baseline': 'retail',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery and pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'})
if report_type == "regions":
df = df[df['sub_region_2'].isnull()]
df = df.drop(columns=['sub_region_2'])
df = df.rename(columns={'sub_region_1': 'region'})
df['region'].fillna('Total', inplace=True)
elif report_type == "US":
df = df[(df['country'] == "United States")]
df = df.drop(columns=['country'])
df = df.rename(
columns={
'sub_region_1': 'state',
'sub_region_2': 'county'})
df['state'].fillna('Total', inplace=True)
df['county'].fillna('Total', inplace=True)
df.to_csv(destination, index=False)
def get_apple_link():
'''Get link of Apple Mobility Trends report file
Returns:
link (str): link of Apple Mobility Trends report file
'''
# get link via API
json_link = "https://covid19-static.cdn-apple.com/covid19-mobility-data/current/v3/index.json"
with urllib.request.urlopen(json_link) as url:
json_data = json.loads(url.read().decode())
link = "https://covid19-static.cdn-apple.com" + \
json_data['basePath'] + json_data['regions']['en-us']['csvPath']
return link
def download_apple_report(directory="apple_reports"):
'''Download Apple Mobility Trends report in CSV
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not a new file has been downloaded
'''
new_files = False
if not os.path.exists(directory):
os.makedirs(directory)
link = get_apple_link()
file_name = "applemobilitytrends.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Apple: No updates')
else:
print('Apple: Update available')
return new_files
def build_apple_report(
source=os.path.join(
'apple_reports',
"applemobilitytrends.csv"),
destination=os.path.join(
'apple_reports',
"apple_mobility_report.csv"),
report_type="regions"):
'''Build cleaned Apple report (transform dates from columns to rows, add country names for subregions and cities)
for worldwide or for some country (currently only for the US)
Args:
source: location of the raw Apple CSV report
destination: destination file path
report_type: two options available: "regions" - report for worldwide, "US" - report for the US
'''
apple = pd.read_csv(source)
apple = apple.drop(columns=['alternative_name'])
apple['country'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'],
axis=1)
if report_type == 'regions':
apple = apple[apple.geo_type != 'county']
apple['sub-region'] = apple.apply(lambda x: 'Total' if x['geo_type'] == 'country/region' else (
x['region'] if x['geo_type'] == 'sub-region' else x['sub-region']), axis=1)
apple['subregion_and_city'] = apple.apply(
lambda x: 'Total' if x['geo_type'] == 'country/region' else x['region'], axis=1)
apple = apple.drop(columns=['region'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['subregion_and_city'])
apple = apple.melt(
id_vars=[
'geo_type',
'subregion_and_city',
'sub-region',
'transportation_type',
'country'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
"geo_type",
"subregion_and_city",
"sub-region",
"date",
"country"],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple[['country', 'sub-region', 'subregion_and_city',
'geo_type', 'date', 'driving', 'transit', 'walking']]
apple = apple.sort_values(by=['country',
'sub-region',
'subregion_and_city',
'date']).reset_index(drop=True)
elif report_type == "US":
apple = apple[apple.country == "United States"].drop(columns=[
'country'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['region']).replace({"United States": "Total"})
apple['region'] = apple.apply(lambda x: x['region'] if (
x['geo_type'] == 'city' or x['geo_type'] == 'county') else 'Total', axis=1)
apple = apple.rename(
columns={
'sub-region': 'state',
'region': 'county_and_city'})
apple = apple.melt(
id_vars=[
'geo_type',
'state',
'county_and_city',
'transportation_type'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
'geo_type',
'state',
'county_and_city',
'date'],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple[['state', 'county_and_city', 'geo_type',
'date', 'driving', 'transit', 'walking']]
apple = apple.sort_values(
by=['state', 'county_and_city', 'geo_type', 'date']).reset_index(drop=True)
apple.to_csv(destination, index=False)
def build_summary_report(
apple_source=os.path.join(
'apple_reports',
"applemobilitytrends.csv"),
google_source=os.path.join(
"google_reports",
"Global_Mobility_Report.csv"),
destination=os.path.join(
"summary_reports",
"summary_report.csv")):
'''Build a merged report from Google and Apple data
Args:
apple_source: location of the raw Apple CSV report
google_source: location of the raw Google CSV report
destination: destination file path
'''
# preprocess apple data
apple = pd.read_csv(apple_source)
apple['country'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'],
axis=1)
apple['sub_region_1'] = apple.apply(
lambda x: 'Total' if x['geo_type'] == 'country/region' else (
x['region'] if x['geo_type'] == 'city' or x['geo_type'] == 'sub-region' else (
x['sub-region'] if x['geo_type'] == 'county' else None)), axis=1)
apple['sub_region_2'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'county' else 'Total', axis=1)
apple = apple.drop(
columns=[
'alternative_name',
'geo_type',
'region',
'sub-region'])
apple = apple.melt(
id_vars=[
'country',
'sub_region_1',
'sub_region_2',
'transportation_type'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
'country',
'sub_region_1',
'sub_region_2',
'date'],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")for v, t in apple.columns]
# convert Apple countries and subregions to Google names
country_AtoG_file = os.path.join(
'auxiliary_data', 'country_Apple_to_Google.csv')
subregions_AtoG_file = os.path.join(
'auxiliary_data', 'subregions_Apple_to_Google.csv')
if os.path.isfile(country_AtoG_file):
country_AtoG = pd.read_csv(country_AtoG_file, index_col=0)
else:
country_AtoG = None
if os.path.isfile(subregions_AtoG_file):
subregions_AtoG = pd.read_csv(subregions_AtoG_file, index_col=0)
else:
subregions_AtoG = None
apple['country'] = apple.apply(lambda x: country_AtoG.loc[x['country'], 'country_google'] if (
country_AtoG is not None and x['country'] in country_AtoG.index) else x['country'], axis=1)
apple['sub_region_1'] = apple.apply(lambda x: subregions_AtoG.loc[x['sub_region_1'], 'subregion_Google'] if (
subregions_AtoG is not None and x['sub_region_1'] in subregions_AtoG.index) else x['sub_region_1'], axis=1)
# process google data
google = pd.read_csv(google_source, low_memory=False)
google['sub_region_1'].fillna('Total', inplace=True)
google['sub_region_2'].fillna('Total', inplace=True)
google = google.rename(
columns={
'country_region': 'country',
'retail_and_recreation_percent_change_from_baseline': 'retail',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery and pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'})
summary = pd.merge(
google, apple, how='outer', left_on=[
'country', 'sub_region_1', 'sub_region_2', 'date'], right_on=[
'country', 'sub_region_1', 'sub_region_2', 'date'], sort=True)
summary = summary.drop(
columns=['country_region_code'])
summary['sub_region_2'].fillna('Total', inplace=True)
summary = summary.sort_values(
by=['country', 'sub_region_1', 'sub_region_2', 'date'])
summary.to_csv(destination, index=False)
def slice_summary_report(
source=os.path.join(
"summary_reports",
"summary_report.csv"),
destination_regions=os.path.join(
"summary_reports",
"summary_report_regions.csv"),
destination_countries=os.path.join(
"summary_reports",
"summary_report_countries.csv"),
destination_US=os.path.join(
"summary_reports",
"summary_report_US.csv")):
'''Slice a merged report into 3 next subreports:
1) Summary report by regions without US counties
2) Summary report by countries
3) Summary report for the US only
Args:
source: location of the summary CSV report
destination_regions: destination for report #1
destination_countries: destination for report #2
destination_US: destination for report #3
'''
# read full summary report
summary = pd.read_csv(source, low_memory=False)
# create report #1
regions = summary[summary['sub_region_2'] == 'Total']
regions = regions.drop(columns=['sub_region_2'])
regions.to_csv(destination_regions, index=False)
# create report #2
countries = summary[summary['sub_region_1'] == 'Total']
countries = countries.drop(columns=['sub_region_1', 'sub_region_2'])
countries.to_csv(destination_countries, index=False)
# create report #3
US = summary[summary['country'] == 'United States']
US.to_csv(destination_US, index=False)
def csv_to_excel(csv_path, excel_path):
"""Helper function which create Excel file from CSV"""
df = | pd.read_csv(csv_path, low_memory=False) | pandas.read_csv |
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import json
import pandas as pd
import numpy as np
import pathlib
import streamlit as st
import getYfData as yfd
from time import sleep
import time
from datetime import datetime
from datetime import timedelta
from dateutil.relativedelta import relativedelta, MO
import math
from dataclasses import dataclass
from dataclasses import field
from dataclasses import InitVar
# ====================
# WAITING TASKS
# ====================
#add json object checker to all api json calls
#label every step in functions
# reduce nested loops to shorter line code
#correct outout size for last dc
# ====================
# DATACLASS FUNCTIONS
# ====================
@dataclass # data class to hold single ticker dataclass object
class singleTickerData(object):
ticker: str
interval: str
start_date: str
end_date: str
outputsize: int
df_tsMeta: pd.core.frame.DataFrame
df_tsData: pd.core.frame.DataFrame
@dataclass # data class to hold list of ticker dataclass object
class multiTickerData(object):
listTickerDClass: list
def populatelist(dcItem):
listTickerDClass.append(dcItem)
@dataclass # data class to hold input values for each ticker - mostly useless might remove it
class singleTickerInput(object):
# api keys
#alpha_vantage_api_key : str = "<KEY>"
twelvedata_api_key: str = "69287070d2f24f60a821b96ec1281011"
ticker: str = "AAPL"
interval: str = "1min"
start_date: str = "2016-01-20"
end_date: str = ""
earliestDateTime_data: str = ""
earliestUnixTime_data: str = ""
timezone: str = ""
# ====================
# TEXT FORMATTING FUNCTIONS
# ====================
# ====================
# HELPER FUNCTIONS
# ====================
# function to create folder(nFolder):
def createfolder(nwFolder):
ii = pathlib.Path(__file__).parent.resolve().parents[0]
#print(f'{str(ii)} is main directory')
p = ii / f'{nwFolder}'
#p = pathlib.Path(f'{nwFolder}/')
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
# fn = "test.txt" # I don't know what is your fn
#filepath = p / fn
# with filepath.open("w", encoding ="utf-8") as f:
# f.write(data)
return p
# function to confirm a json key/pair exists
def chk_json_key_exists(json_key, json_object):
if (json_key in json_object):
json_key_exist = True
else:
json_key_exist = False
return json_key_exist
# create json.files folder if not exist and dump json file in ot
def dumpjsonData(filename, json_object):
p = str(createfolder('files.json'))
print(f'json files will be stored in {p} folder')
# creation of json file for files with status ok
mypath = pathlib.Path(f'{p}', f'{filename}.json')
with open(mypath, "w") as outfile:
json.dump(json_object, outfile, indent=4, sort_keys=True)
# convert json key/pair to df if status key is "ok"
def get_tck_apistocks_json_df(json_object, status_hdr='status', json_key='data'):
status_hdr_exist = chk_json_key_exists(
json_key=status_hdr, json_object=json_object)
data_df = pd.DataFrame()
if (status_hdr_exist == True):
status_hdr_data = json_object[status_hdr]
if status_hdr_data == 'ok':
json_key_data = json_object[json_key]
data_df = pd.DataFrame(json_key_data)
return data_df
# get ticker stats: shares float and outstanding from 12data statistics api call
def get_tck_stats_items(apikey_12Data, symbol):
data_type = "statistics"
apikey = apikey_12Data
twelvedata_url = f'https://api.twelvedata.com/{data_type}?symbol={symbol}&apikey={apikey}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
statistics_exists = chk_json_key_exists(
json_key="statistics", json_object=json_object)
if statistics_exists:
json_object = json_object["statistics"]
stock_statistics_exists = chk_json_key_exists(
json_key="stock_statistics", json_object=json_object)
if stock_statistics_exists:
json_object = json_object["stock_statistics"]
shares_outstand_data = json_object["shares_outstanding"]
float_shares_data = json_object["float_shares"]
return shares_outstand_data, float_shares_data
else:
# we wont get data if we dont have a premium 12data subscription, go get data from yfinanace
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = yfd.get_yf_float_outstand_shares(
symbol)
return shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre
# get ticker stats: shares float and outstanding from yfinance
def get_tck_stats_items_from_yFin(symbol):
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = yfd.get_yf_float_outstand_shares(
symbol)
return shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre
# get ticker stocks df: list of tickers from 12data statistics api call
# @st.cache
def get_tck_stocks_df(apikey_12Data):
data_type = "stocks"
apikey = apikey_12Data
twelvedata_url = f'https://api.twelvedata.com/{data_type}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
#types_stocks = ['EQUITY', 'Common', 'Common Stock', 'American Depositary Receipt',
# 'Real Estate Investment Trust (REIT)', 'Unit', 'GDR', 'Closed-end Fund',
# 'ETF', 'Depositary Receipt', 'Preferred Stock', 'Limited Partnership',
# 'OTHER_SECURITY_TYPE', 'Warrant', 'STRUCTURED_PRODUCT', 'Exchange-traded Note',
# 'Right', 'FUND', 'Trust', 'Index', 'Unit Of Beneficial Interest',
# 'MUTUALFUND', 'New York Registered Shares']
data_df = get_tck_apistocks_json_df(
json_object, status_hdr='status', json_key='data')
return data_df
# get ticker name series: from imported df -called from combine_stock_stats_items
def get_tcker_symbol_lst(data_df):
symbol_lst = sorted(data_df["symbol"].unique())
return symbol_lst
# get ticker type series: from imported df -called from combine_stock_stats_items
def get_tcker_type_lst(data_df):
type_lst = sorted(data_df["type"].unique())
return type_lst
# get ticker country series: from imported df -called from combine_stock_stats_items
def get_tcker_country_lst(data_df):
country_lst = sorted(data_df["country"].unique())
return country_lst
# get ticker exchange series: from imported df -called from combine_stock_stats_items
def get_tcker_exchange_lst(data_df):
exchange_lst = sorted(data_df["exchange"].unique())
return exchange_lst
# confirm ifmembers of symbol series exist in symbol list: not finished not returning anything !!!!!!!!!
def filter_tcker_symbollist(symbol_lst):
if len(symbol_lst) != 0:
cond_symbol = stocks_df["symbol"].isin(symbol_lst)
# filter tickers stock df for filter conditions
def filter_tcker(apikey_12Data, stocks_df, symbol_select, type_select, country_select, exchange_select):
lstOfDf = []
if len(symbol_select) != 0:
cond_symbol = stocks_df["symbol"].isin(symbol_select)
df_symbol = stocks_df[cond_symbol]
lstOfDf.append(df_symbol)
if len(type_select) != 0:
cond_type = stocks_df["type"].isin(type_select)
df_type = stocks_df[cond_type]
lstOfDf.append(df_type)
if len(country_select) != 0:
cond_country = stocks_df["country"].isin(country_select)
df_country = stocks_df[cond_country]
lstOfDf.append(df_country)
if len(exchange_select) != 0:
cond_exchange = stocks_df["exchange"].isin(exchange_select)
df_exchange = stocks_df[cond_exchange]
lstOfDf.append(df_exchange)
if len(lstOfDf) == 0:
df_filter = stocks_df
elif len(lstOfDf) == 1:
df_filter = lstOfDf[0]
elif len(lstOfDf) == 2:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
elif len(lstOfDf) == 3:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
df_filter = pd.merge(df_filter, lstOfDf[2], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPB')).filter(regex='^(?!.*_DROPB)')
elif len(lstOfDf) == 4:
df_filter = pd.merge(lstOfDf[0], lstOfDf[1], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPA')).filter(regex='^(?!.*_DROPA)')
df_filter = pd.merge(df_filter, lstOfDf[2], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPB')).filter(regex='^(?!.*_DROPB)')
df_filter = pd.merge(df_filter, lstOfDf[3], how='inner', on=[
'symbol', 'type', 'country', 'exchange'], suffixes=('', '_DROPC')).filter(regex='^(?!.*_DROPC)')
df_filter, symb_error_out_shre_lst, symb_error_flt_shre_lst = combine_stock_stats_items(
apikey_12Data, df_filter)
df_filter = addEarliestTimeStampsSeries(apikey_12Data, df_filter)
return df_filter, symb_error_out_shre_lst, symb_error_flt_shre_lst
# add time-stamp series to stock dataframe
def addEarliestTimeStampsSeries(apikey_12Data, df_filter):
str_timestamp_lst = []
unx_timestamp_lst = []
for indx in df_filter.index:
symbol = df_filter['symbol'][indx]
earliest_dct = getTickerEarliesrTimeStamp(apikey_12Data, symbol)
earilest_dt_str = earliest_dct['datetime_data']
earilest_dt_unx = earliest_dct['unix_time_data']
str_timestamp_lst.append(earilest_dt_str)
unx_timestamp_lst.append(earilest_dt_unx)
df_filter["earliest_timestamp"] = str_timestamp_lst
df_filter["earliest_timestamp_unix"] = unx_timestamp_lst
return df_filter
#combine stock df with shares float/outstanding series
def combine_stock_stats_items(apikey_12Data, data_df):
# get listing of symbols
symbol_lst = get_tcker_symbol_lst(data_df)
cond_lst = []
shares_outstand_lst = []
float_shares_lst = []
symb_error_out_shre_lst = []
symb_error_flt_shre_lst = []
# loop names to get 2 shares data
for symbol in symbol_lst:
shares_outstand_data, float_shares_data, error_out_shre, error_flt_shre = get_tck_stats_items_from_yFin(
symbol)
# append symbols not returning outstanding/float shares to error lists
if error_out_shre == True:
symb_error_out_shre_lst.append(symbol)
if error_flt_shre == True:
symb_error_flt_shre_lst.append(symbol)
cond = (data_df["symbol"] == symbol)
cond_lst.append(cond)
shares_outstand_lst.append(shares_outstand_data)
float_shares_lst.append(float_shares_data)
data_df["shares-outstanding"] = np.select(cond_lst, shares_outstand_lst)
data_df["float-shares"] = np.select(cond_lst, float_shares_lst)
return data_df, symb_error_out_shre_lst, symb_error_flt_shre_lst
# append data to data_df
#data_df = get_tck_stocks_df(apikey_12Data)
#get and clean up a list of tickers from a string
def get_tcker_lst_fromStringIO(string_data):
lines = string_data.split("\n")
string = []
tcker_lst = []
for line in lines:
line = line.replace('-\n', '')
line = line.replace('\r', '')
line = line.replace(',,', ',')
line = line.replace(', ', ',')
string.append(line)
tckerItemPerLine = line.split(",")
for tckers in tckerItemPerLine:
if tckers.startswith("'") and tckers.endswith("'"):
tckers = tckers[1:-1]
elif tckers.startswith('"') and tckers.endswith('"'):
tckers = tckers[1:-1]
tckers = tckers.upper()
if tckers != " ":
if tckers != "":
tcker_lst.append(tckers)
return tcker_lst
#convert datetime object to string of length 10
def convertDateTimeToDateStrLen10(dateTimeObj):
StringLen10 = dateTimeObj.strftime("%Y-%m-%d")
return StringLen10
#convert string of length 10 to string of length 19
def convertDateStrLen10toDateStrLen19(StringLen10):
if len(StringLen10) == 10:
StringLen19 = f'{StringLen10} 00:00:00'
else:
pass
return StringLen19
#convert string of length 10 to datetime object
def convertDateStrLen10toDateTime(StringLen10):
StringLen19 = convertDateStrLen10toDateStrLen19(StringLen10)
datetimeObj = datetime.strptime(StringLen19, "%Y-%m-%d %H:%M:%S")
return datetimeObj
#convert string of length 10 to datetime object with NoHrsMinSecs
def convertDateStrLen10toDateTimeNoHrsMinSecs(StringLen10):
datetimeObj = datetime.strptime(StringLen10, "%Y-%m-%d")
return datetimeObj
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
pass
class CatchJsonError(Error):
"""Raised when error code from json object"""
"""Exception raised for errors in the input salary.
Attributes:
salary -- input salary which caused the error
message -- explanation of the error
"""
def __init__(self, code, message, status):
self.code = code
self.message = message
self.status = message
super().__init__(self.message)
def __str__(self):
st.error(f'code: {code} \nmessage:{message} \nstatus: {status}')
return f'{self.code} \n{self.message} \nstatus: {status}'
# WORK IN PROGRESS!!!!!!! check json object for api errors
def chkJsonObj(json_object):
#get the keys of json object dictr
keys = json_object.keys()
#check if error code json object exist in our json object keys
error_keys = ['code', 'message', 'status']
trueCnt = 0
falseCnt = 0
""" trueCnt values above zero tells us the error keys exist as keys of json dict
falseCnt values above zero tells us at least 1 error keys is not a member key of json dict"""
for error_key in error_keys:
testval = error_key in keys
if testval == True:
trueCnt += 1
else:
falseCnt += 1
# tells us this is a valid json object and returns it for onward use
if falseCnt > 0:
return json_object
# tells us this json object is an error json thrown out
else:
code = json_object['code']
message = json_object['message']
status = json_object['status']
raise CatchJsonError(code, message, status)
# def function to get earliest timestamp for each stock ticker needs checking/running!!!!!!
def getTickerEarliesrTimeStamp(twelvedata_api_key, ticker):
data_types = ["earliest_timestamp"]
#twelvedata_url = f'https://api.twelvedata.com/{data_types}?symbol={ticker}&interval=1day&apikey={twelvedata_api_key}'
twelvedata_url = f'https://api.twelvedata.com/earliest_timestamp?symbol={ticker}&interval=1day&apikey={twelvedata_api_key}'
json_object = requests.get(twelvedata_url).json()
json_object = chkJsonObj(json_object)
#session = requests.Session()
# In case I run into issues, retry my connection
#retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[ 500, 502, 503, 504 ])
#session.mount('http://', HTTPAdapter(max_retries=retries))
# Initial request to get the ticker count
#r = session.get(twelvedata_url)
#json_object = r.json()
datetime_data = json_object['datetime']
unix_time_data = json_object['unix_time']
mydict = {}
mydict['datetime_data'] = datetime_data
mydict['unix_time_data'] = unix_time_data
return mydict
# function adds atime interval to a date using relative time delta calculations
def addRelTimeDelta(date_dt, timeIntervalValue, timeIntervalUnit):
errorcode = 0
if timeIntervalUnit == "seconds":
rel_delta = relativedelta(seconds=timeIntervalValue)
elif timeIntervalUnit == "minutes":
rel_delta = relativedelta(minutes=timeIntervalValue)
elif timeIntervalUnit == "hours":
rel_delta = relativedelta(hours=timeIntervalValue)
elif timeIntervalUnit == "days":
rel_delta = relativedelta(days=timeIntervalValue)
elif timeIntervalUnit == "weeks":
rel_delta = relativedelta(weeks=timeIntervalValue)
elif timeIntervalUnit == "months":
rel_delta = relativedelta(months=timeIntervalValue)
elif timeIntervalUnit == "years":
rel_delta = relativedelta(years=timeIntervalValue)
else:
errorcode = 1
#datetime_object = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
datetime_object = date_dt
datetime_object += rel_delta
new_date_str = datetime_object.strftime('%Y-%m-%d %H:%M:%S')
new_date_dt = datetime.strptime(new_date_str, '%Y-%m-%d %H:%M:%S')
return new_date_dt
""" # returns list of calculated start/end time/date that program will run to get complete range of data:
use this to circumspect the 5000entries limit per api call
"""
@st.cache(suppress_st_warning=True)
def getStartStopRngeLst(symbol, interval, start_date_dt, end_date_dt):
# required data
maxRequestPerDay_freekey = 800
maxNosDataPts = 5000
useNosDataPts = 4500
interval_lst = ['1min', '5min', '15min', '30min',
'45min', '1h', '2h', '4h', '1day', '1week', '1month']
intervalQty_lst = [1, 5, 15, 30, 45,
60, 120, 240, 1440, 10080, 44640]
start_date_str = start_date_dt.strftime("%Y-%m-%d %H:%M:%S")
end_date_str = end_date_dt.strftime("%Y-%m-%d %H:%M:%S")
# get difference between start and end dates
#timedelta_raw = parsed_end - parsed_start
timedelta_raw = end_date_dt - start_date_dt
timedeltaInSeconds_int = timedelta_raw.total_seconds()
timedeltaInMinutes_int = timedeltaInSeconds_int / 60
# dictionary of mins/bar mapped against interval ie 5mins:5
interval_intervalQty_dict = {
interval_lst[i]: intervalQty_lst[i] for i in range(len(interval_lst))}
intervalInMinutes = interval_intervalQty_dict[interval]
#calculate total nos of datapoints
data_pts_total = math.ceil(timedeltaInMinutes_int/intervalInMinutes)
#if data_pts_total <= useNosDataPts:
# use_Rnge_per_Request_int = useNosDataPts * data_pts_total
# use_Rnge_per_Request_datetime = timedelta(seconds=(timedeltaInMinutes_int * 60))
#else:
# use_Rnge_per_Request_int = useNosDataPts * data_pts_total
# use_Rnge_per_Request_datetime = timedelta(seconds=(useNosDataPts * intervalInMinutes * 60))
#calculate the total number of start/stop pairs or requests to make
nosReq = math.ceil(data_pts_total/useNosDataPts)
# we are creating lists of start date/enddate/time interval
symbol_namn_lst = []
start_time_lst = []
end_time_lst = []
interval_lst = []
intervalinMin_lst = []
data_pts_lst = []
chartRnge_lst = []
cnt = 0
leftOverDatapts = data_pts_total
for nos in range(nosReq):
# populate entries
if nos == 0:
start_time_entry = start_date_dt
else:
start_time_entry = end_time_lst[nos - 1]
# can switch to max_Rnge_per_Request_int instead of use_Rnge_per_Request_int
interval_entry = intervalInMinutes
if data_pts_total <= useNosDataPts:
data_pts_entry = data_pts_total
else:
data_pts_entry = useNosDataPts
# can switch to max_Rnge_per_Request_int instead of use_Rnge_per_Request_int
ChartRnge_entry = data_pts_total
# correct the last data_pts_entry
#st.write(f'a1 {cnt}: {leftOverDatapts} : {data_pts_total} : {data_pts_entry}')
if leftOverDatapts >= data_pts_entry:
leftOverDatapts = leftOverDatapts - data_pts_entry
else:
data_pts_entry = leftOverDatapts
#st.write(f'a2 {cnt}: {leftOverDatapts} : {data_pts_total} : {data_pts_entry}')
use_Rnge_per_Request_int = data_pts_entry * intervalInMinutes * 60
use_Rnge_per_Request_datetime = timedelta(seconds=(use_Rnge_per_Request_int))
end_time_entry = start_time_entry + use_Rnge_per_Request_datetime
# populate lists
start_time_lst.append(start_time_entry)
end_time_lst.append(end_time_entry)
interval_lst.append(interval)
intervalinMin_lst.append(interval_entry)
data_pts_lst.append(data_pts_entry)
chartRnge_lst.append(ChartRnge_entry)
symbol_namn_lst.append(symbol)
cnt += 1
# lets create dataframe from lists created above
chartTSInput_dict = {"symbol": symbol_namn_lst, "start_time": start_time_lst, "end_time": end_time_lst,
"interval": interval_lst, "interval_mins": intervalinMin_lst,
"data_pts_limit": data_pts_lst, "data_pts_all": chartRnge_lst}
chartTSInput_df = | pd.DataFrame(chartTSInput_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import six
import pandas as pd
from rqdatac.client import get_client
from rqdatac.validators import (
ensure_date_int,
ensure_date_or_today_int,
ensure_order_book_ids,
ensure_string,
ensure_string_in,
check_items_in_container
)
from rqdatac.decorators import export_as_api
@export_as_api
def shenwan_industry(index_name, date=None, market="cn"):
"""获取申万行业组成
:param index_name: 申万行业代码或名字, 如'801010.INDX'或'农林牧渔'
:param date: 如 '2015-01-07' (Default value = None)
:param market: (Default value = "cn")
:returns: 返回输入日期最近交易日的申万行业组成
"""
if not isinstance(index_name, six.string_types):
raise ValueError("string expected, got {!r}".format(index_name))
if not date:
date = datetime.date.today()
date = ensure_date_int(date)
return get_client().execute("shenwan_industry", index_name, date, market=market)
LEVEL_MAP = (
None,
("index_code", "index_name"),
("second_index_code", "second_index_name"),
("third_index_code", "third_index_name"),
)
@export_as_api
def shenwan_instrument_industry(order_book_ids, date=None, level=1, expect_df=False, market="cn"):
"""获取股票对应的申万行业
:param order_book_ids: 股票列表,如['000001.XSHE', '000002.XSHE']
:param date: 如 '2015-01-07' (Default value = None)
:param level: (Default value = 1)
:param expect_df: 返回 DataFrame,默认为 False
:param market: (Default value = "cn")
:returns: code, name
返回输入日期最近交易日的股票对应申万行业
"""
if level not in [0, 1, 2, 3]:
raise ValueError("level should be in 0,1,2,3")
order_book_ids = ensure_order_book_ids(order_book_ids)
if not date:
date = datetime.date.today()
date = ensure_date_int(date)
r = get_client().execute("shenwan_instrument_industry", order_book_ids, date, level, market=market)
if not r:
return
if len(order_book_ids) == 1 and not expect_df:
r = r[0]
if level != 0:
return r[LEVEL_MAP[level][0]], r[LEVEL_MAP[level][1]]
else:
return (
r["index_code"],
r["index_name"],
r["second_index_code"],
r["second_index_name"],
r["third_index_code"],
r["third_index_name"],
)
df = pd.DataFrame(r).set_index("order_book_id")
if level != 0 and level != 1:
df.rename(columns=dict(zip(LEVEL_MAP[level], LEVEL_MAP[1])), inplace=True)
return df
@export_as_api
def zx_industry(industry_name, date=None):
"""获取中信行业股票列表
:param industry_name: 中信行业名称或代码
:param date: 查询日期,默认为当前最新日期
:return: 所属目标行业的order_book_id list or None
"""
if not isinstance(industry_name, six.string_types):
raise ValueError("string expected, got {!r}".format(industry_name))
if not date:
date = datetime.date.today()
date = ensure_date_int(date)
return get_client().execute("zx_industry", industry_name, date)
ZX_LEVEL_MAP = (
None,
"first_industry_name",
"second_industry_name",
"third_industry_name",
)
@export_as_api
def zx_instrument_industry(order_book_ids, date=None, level=1, expect_df=False):
"""获取股票对应的中信行业
:param order_book_ids: 股票列表,如['000001.XSHE', '000002.XSHE']
:param date: 如 '2015-01-07' (Default value = None)
:param level: (Default value = 1)
:param expect_df: 返回 DataFrame,默认为 False
:returns: code, name
返回输入日期最近交易日的股票对应中信行业
"""
if level not in [0, 1, 2, 3]:
raise ValueError("level should be in 0,1,2,3")
order_book_ids = ensure_order_book_ids(order_book_ids)
if not date:
date = datetime.date.today()
date = ensure_date_int(date)
r = get_client().execute("zx_instrument_industry", order_book_ids, date, level)
if not r:
return
if len(order_book_ids) == 1 and not expect_df:
r = r[0]
if level != 0:
return [r[ZX_LEVEL_MAP[level]], ]
else:
return [
r["first_industry_name"],
r["second_industry_name"],
r["third_industry_name"],
]
df = pd.DataFrame(r).set_index("order_book_id")
return df
@export_as_api
def get_industry(industry, source='sws', date=None, market="cn"):
"""获取行业股票列表
:param industry: 行业名称或代码
:param source: 分类来源。sws: 申万, citics: 中信, gildata: 聚源
:param date: 查询日期,默认为当前最新日期
:param market: (Default value = "cn")
:return: 所属目标行业的order_book_id list or None
"""
industry = ensure_string(industry, "industry")
source = ensure_string_in(source, ["sws", "citics", "gildata"], "source")
date = ensure_date_or_today_int(date)
res = get_client().execute("get_industry", industry, source, date, market=market)
return sorted(res)
@export_as_api
def get_instrument_industry(order_book_ids, source='sws', level=1, date=None, market="cn"):
"""获取股票对应的行业
:param order_book_ids: 股票列表,如['000001.XSHE', '000002.XSHE']
:param source: 分类来源。sws: 申万, citics: 中信, gildata: 聚源
:param date: 如 '2015-01-07' (Default value = None)
:param level: (Default value = 1)
:param market: (Default value = "cn")
:returns: code, name
返回输入日期最近交易日的股票对应行业
"""
order_book_ids = ensure_order_book_ids(order_book_ids)
source = ensure_string_in(source, ["sws", "citics", "gildata"], "source")
check_items_in_container(level, [0, 1, 2, 3], 'level')
date = ensure_date_or_today_int(date)
r = get_client().execute("get_instrument_industry", order_book_ids, source, level, date, market=market)
if not r:
return
return | pd.DataFrame(r) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.