prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import shutil
import tempfile
import pytest
from ddf_utils.chef.api import Chef
from ddf_utils.chef.exceptions import IngredientError, ProcedureError, ChefRuntimeError
wd = os.path.dirname(__file__)
def chef_fn(fn):
return Chef.from_recipe(os.path.join(wd, 'recipes', fn),
ddf_dir=os.path.join(wd, 'datasets'),
procedure_dir=os.path.join(wd, 'procedures'))
def test_debug_option():
chef = chef_fn('test_debug_option.yaml')
chef.run()
assert os.path.exists('./_debug/dps_key_translated')
assert os.path.exists('./_debug/res')
# cleanup
shutil.rmtree('./_debug/')
def test_include():
chef = chef_fn('test_include_main.yml')
chef.run()
def test_include_fail():
with pytest.raises(ChefRuntimeError):
chef = chef_fn('test_include_fail_main.yaml')
chef.run()
def test_extract_concepts():
chef = chef_fn('test_extract_concepts.yaml')
res = chef.run()
res = res[0].get_data()['concept']
assert 'geo' in res.concept.values
assert 'year' in res.concept.values
assert res.set_index('concept').loc['year', 'concept_type'] == 'time'
def test_filter():
chef = chef_fn('test_filter.yaml')
res = chef.run()
res_ent = list(filter(lambda x: True if x.dtype == 'entities' else False, res))[0]
res_dps = list(filter(lambda x: True if x.dtype == 'datapoints' else False, res))[0]
country = res_ent.get_data()['country']
dps = res_dps.compute()
assert set(dps.keys()) == {'imr_upper', 'imr_lower'}
for dp in dps.values():
# assert dp.year.dtype == np.int64
assert np.all(dp.year > "2000")
assert set(dp.country.unique()) == {'usa', 'swe'}
assert set(country.columns) == {'country', 'countryname'}
assert set(country.country.values) == {'usa', 'swe'}
def test_flatten():
chef = chef_fn('test_flatten.yml')
res = chef.run()
for r in res:
print(r.compute().keys())
assert set(res[0].compute().keys()) == {
'agriculture_thousands_f', 'agriculture_thousands_m',
'agriculture_percentage_f', 'agriculture_percentage_m',
'agriculture_thousands', 'agriculture_percentage'}
# assert res[0].compute()['agriculture_percentage_m'].dtypes['year'] == np.int64
def test_groupby():
chef = chef_fn('test_groupby.yaml')
chef.run()
dp1 = chef.dag.get_node('grouped-datapoints-1').evaluate().compute()
dp2 = chef.dag.get_node('grouped-datapoints-2').evaluate().compute()
assert len(dp1.keys()) == 1
assert len(dp2.keys()) == 1
assert set(dp1['agriculture_percentage'].columns) == {'country', 'year', 'agriculture_percentage'}
assert set(dp2['agriculture_thousands'].columns) == {'country', 'year', 'agriculture_thousands'}
# assert dp1['agriculture_percentage'].dtypes['year'] == np.int16
# assert dp2['agriculture_thousands'].dtypes['year'] == np.int16
def test_custom_procedure():
chef = chef_fn('test_import_procedure.yml')
chef.run()
def test_ingredients():
for i in range(1, 4):
chef = chef_fn('test_ingredients_{}.yaml'.format(i))
chef.run()
def test_translate_column():
chef = chef_fn('test_translate_column.yaml')
chef.run()
res = chef.dag.get_node('bp-datapoints-aligned').evaluate().compute()
def test_translate_header():
chef = chef_fn('test_translate_header.yaml')
res = chef.run()
indicators = ['infant_mortality_median', 'imr_lower']
data = res[0].compute()
assert set(data.keys()) == set(indicators)
for i in indicators:
assert set(data[i].columns) == set(['geo', 'time', i])
# assert data[i].dtypes['time'] == np.int64
data = res[1].get_data()
assert 'city' in data.keys()
assert 'city' in data['city'].columns
assert 'is--city' in data['city'].columns
def test_translate_header_fail():
chef = chef_fn('test_translate_header_fail_1.yaml')
with pytest.raises(ValueError):
chef.run()
chef = chef_fn('test_translate_header_fail_2.yaml')
with pytest.raises(ValueError):
chef.run()
chef = chef_fn('test_translate_header_fail_3.yaml')
with pytest.raises(ValueError):
chef.run()
def test_trend_bridge():
chef = chef_fn('test_trend_bridge.yml')
chef.run()
res = chef.dag.get_node('res-1').evaluate().compute()
# assert res['imr_lower'].dtypes['year'] == np.int64
def test_window():
chef = chef_fn('test_window.yaml')
chef.run()
dp1 = chef.dag.get_node('rolling_datapoints_1').evaluate().compute()
dp2 = chef.dag.get_node('rolling_datapoints_2').evaluate().compute()
dp3 = chef.dag.get_node('rolling_datapoints_3').evaluate().compute()
dp4 = chef.dag.get_node('rolling_datapoints_4').evaluate().compute()
assert dp1['value']['value'].tolist() == [1, 1, 1, 1, 1, 1, 1, 1.5, 2, 3, 4, 5]
assert dp2['value']['value'].tolist() == [1, 2, 3, 4, 5, 6, 1, 3, 6, 10, 15, 21]
assert dp3['value']['value'].tolist() == [1, 1, 1, 1, 1, 1, 1, 1.5, 2, 3, 4, 5]
assert dp4['value']['value'].tolist() == [1, 2, 3, 4, 5, 6, 1, 3, 6, 10, 15, 21]
def test_serving():
chef1 = chef_fn('test_serve_procedure.yaml')
res = chef1.run()
assert len(res) == 3
chef2 = chef_fn('test_serving_section.yaml')
tmpdir = tempfile.mkdtemp()
res = chef2.run(serve=True, outpath=tmpdir)
assert len(res) == 3
assert os.path.exists(os.path.join(tmpdir, 'test_serving'))
def test_merge():
chef = chef_fn('test_merge.yaml')
res = chef.run()
data = res[0].compute()
indicators = ['imr_lower', 'imr_median', 'imr_upper',
'biofuels_production_kboed', 'biofuels_production_ktoe']
assert set(data.keys()) == set(indicators)
# assert data['imr_median'].dtypes['year'] == np.int64
imr_lower = data['imr_lower'].set_index(['geo', 'year'])
assert imr_lower.loc[('afg', "1961"), 'imr_lower'] == 2055
def test_merge_2():
chef = chef_fn('test_merge_2.yaml')
res = chef.run()
data = res[0].get_data()
data = data['concept'].set_index('concept')
assert data.loc['col1', 'col1'] == 'testing1'
assert data.loc['col2', 'col2'] == 'testing2'
assert data.loc['col1', 'col2'] == 'bar'
assert data.loc['col2', 'col1'] is np.nan
def test_merge_3():
chef = chef_fn('test_merge_3.yaml')
res = chef.run()
data = res[0].compute()
data = data['indicator'].set_index(['country', 'year'])
assert data.loc[('chn', 2000), 'indicator'] == 1
assert data.loc[('chn', 2001), 'indicator'] == 2
assert data.loc[('chn', 2002), 'indicator'] == 3
assert data.loc[('chn', 2003), 'indicator'] == 3
assert data.loc[('chn', 2004), 'indicator'] == 3
def test_merge_fail():
chef = chef_fn('test_merge_fail.yaml')
with pytest.raises(ProcedureError):
chef.run()
def test_dag_fail():
chef = chef_fn('test_dag_fail.yaml')
with pytest.raises(ChefRuntimeError):
chef.run()
def test_run_op():
chef = chef_fn('test_run_op.yaml')
chef.run()
res = chef.dag.get_node('res').evaluate().compute()
# assert res['mean_imr'].dtypes['year'] == np.int16
def test_import_procedure_fail():
chef = chef_fn('test_import_procedure.yml')
chef.add_procedure('datapoints', 'nonexists', ['result'], result='error-ing')
try:
chef.run()
except ChefRuntimeError:
pass
def test_ops():
from ddf_utils.chef.ops import gt, lt, between, aagr
x = np.ones(1000)
assert gt(x, 0)
assert gt(x, 1, include_eq=True)
assert not gt(x, 2)
assert lt(x, 2)
assert lt(x, 1, include_eq=True)
assert not lt(x, 0)
assert between(x, 0, 2)
assert np.all(aagr( | pd.DataFrame(x) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pyVIA.examples as via#examples as via
import pandas as pd
import umap
import scanpy as sc
import numpy as np
import warnings
def run_Toy_multi(foldername="Datasets/"):
via.main_Toy(ncomps=10, knn=30,dataset='Toy3', random_seed=2,foldername=foldername)
def run_Toy_discon(foldername="Datasets/"):
via.main_Toy(ncomps=10, knn=30,dataset='Toy4', random_seed=2,foldername=foldername)
def run_EB(foldername="Datasets/"):
via.main_EB_clean(ncomps=30, knn=20, v0_random_seed=24, foldername=foldername)
def run_generic_wrapper(foldername = "Datasets/", knn=20, ncomps = 20):
# Read the two files:
# 1) the first file contains 200PCs of the Bcell filtered and normalized data for the first 5000 HVG.
# 2)The second file contains raw count data for marker genes
data = pd.read_csv(foldername+'Bcell_200PCs.csv')
data_genes = pd.read_csv(foldername+'Bcell_markergenes.csv')
data_genes = data_genes.drop(['Unnamed: 0'], axis=1)#cell
true_label = data['time_hour']
data = data.drop(['cell', 'time_hour'], axis=1)
adata = sc.AnnData(data_genes)
adata.obsm['X_pca'] = data.values
# use UMAP or PHate to obtain embedding that is used for single-cell level visualization
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(data.values[:, 0:5])
print('finished embedding')
# list marker genes or genes of interest if known in advance. otherwise marker_genes = []
marker_genes = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4', 'Sp7'] # irf4 down-up
# call VIA. We identify an early (suitable) start cell root = [42]. Can also set an arbitrary value
via.via_wrapper(adata, true_label, embedding, knn=knn, ncomps=ncomps, jac_std_global=0.15, root=[42], dataset='',
random_seed=1,v0_toobig=0.3, v1_toobig=0.1, marker_genes=marker_genes, piegraph_edgeweight_scalingfactor=1, piegraph_arrow_head_width=.2)
def run_faced_cell_cycle(foldername = '/home/shobi/Trajectory/Datasets/FACED/'):
#FACED imaging cytometry based biophysical features
df = pd.read_csv(foldername +'mcf7_38features.csv')
df = df.drop('Unnamed: 0', 1)
true_label = pd.read_csv(foldername+'mcf7_phases.csv')
true_label = list(true_label['phase'].values.flatten())
print('There are ', len(true_label), 'MCF7 cells and ', df.shape[1], 'features')
ad = sc.AnnData(df)
ad.var_names = df.columns
# normalize features
sc.pp.scale(ad)
sc.tl.pca(ad, svd_solver='arpack')
# Weight the top features (ranked by Mutual Information and Random Forest Classifier)
X_in = ad.X
df_X = pd.DataFrame(X_in)
df_X.columns = df.columns
df_X['Area'] = df_X['Area'] * 3
df_X['Dry Mass'] = df_X['Dry Mass'] * 3
df_X['Volume'] = df_X['Volume'] * 20
X_in = df_X.values
ad = sc.AnnData(df_X)
# apply PCA
sc.tl.pca(ad, svd_solver='arpack')
ad.var_names = df_X.columns
f, ax = plt.subplots(figsize=[20, 10])
embedding = umap.UMAP().fit_transform(ad.obsm['X_pca'][:, 0:20])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(X_in)
cell_dict = {'T1_M1': 'yellow', 'T2_M1': 'yellowgreen', 'T1_M2': 'orange', 'T2_M2': 'darkgreen', 'T1_M3': 'red',
'T2_M3': 'blue'}
cell_phase_dict = {'T1_M1': 'G1', 'T2_M1': 'G1', 'T1_M2': 'S', 'T2_M2': 'S', 'T1_M3': 'M/G2', 'T2_M3': 'M/G2'}
for key in list(set(true_label)): # ['T1_M1', 'T2_M1','T1_M2', 'T2_M2','T1_M3', 'T2_M3']:
loc = np.where(np.asarray(true_label) == key)[0]
ax.scatter(embedding[loc, 0], embedding[loc, 1], c=cell_dict[key], alpha=.7, label=cell_phase_dict[key])
plt.legend(markerscale=1.5, fontsize=14)
plt.show()
knn = 20
jac_std_global = 0.5
random_seed = 1
root_user = ['T1_M1']
v0 = via.VIA(X_in, true_label, jac_std_global=jac_std_global, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=1.,
too_big_factor=0.3, root_user=root_user, dataset='faced', random_seed=random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True,
preserve_disconnected_after_pruning=True,
pseudotime_threshold_TS=40)
v0.run_VIA()
tsi_list = via.get_loc_terminal_states(v0, X_in)
v1 = via.VIA(X_in, true_label, jac_std_global=jac_std_global, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=1.,
too_big_factor=0.05, super_cluster_labels=v0.labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
preserve_disconnected=True, dataset='faced',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed,
full_neighbor_array=v0.full_neighbor_array, full_distance_array=v0.full_distance_array,
ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned, pseudotime_threshold_TS=40)
v1.run_VIA()
super_clus_ds_PCA_loc = via.sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(v0.labels)))
via.draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, v1.labels, v0.labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=38)
plt.show()
all_cols = ['Area', 'Volume', 'Dry Mass', 'Circularity', 'Orientation', 'Phase Entropy Skewness',
'Phase Fiber Radial Distribution', 'Eccentricity', 'AspectRatio', 'Dry Mass Density', 'Dry Mass var',
'Dry Mass Skewness', 'Peak Phase', 'Phase Var', 'Phase Skewness', 'Phase Kurtosis', 'Phase Range',
'Phase Min', 'Phase Centroid Displacement', 'Phase STD Mean', 'Phase STD Variance',
'Phase STD Skewness', 'Phase STD Kurtosis', 'Phase STD Centroid Displacement',
'Phase STD Radial Distribution', 'Phase Entropy Mean', 'Phase Entropy Var', 'Phase Entropy Kurtosis',
'Phase Entropy Centroid Displacement', 'Phase Entropy Radial Distribution',
'Phase Fiber Centroid Displacement', 'Phase Fiber Pixel >Upper Percentile', 'Phase Fiber Pixel >Median',
'Mean Phase Arrangement', 'Phase Arrangement Var', 'Phase Arrangement Skewness',
'Phase Orientation Var', 'Phase Orientation Kurtosis']
plot_n = 7
fig, axs = plt.subplots(2, plot_n, figsize=[20, 10]) # (2,10)
for enum_i, pheno_i in enumerate(all_cols[0:14]): # [0:14]
subset_ = df[pheno_i].values
if enum_i >= plot_n:
row = 1
col = enum_i - plot_n
else:
row = 0
col = enum_i
ax = axs[row, col]
v0.get_gene_expression_multi(ax=ax, gene_exp=subset_, title_gene=pheno_i)
fig2, axs2 = plt.subplots(2, plot_n, figsize=[20, 10])
for enum_i, pheno_i in enumerate(all_cols[2 * plot_n:2 * plot_n + 14]):
subset_ = df[pheno_i].values
if enum_i >= plot_n:
row = 1
col = enum_i - plot_n
else:
row = 0
col = enum_i
ax2 = axs2[row, col]
v0.get_gene_expression_multi(ax=ax2, gene_exp=subset_, title_gene=pheno_i)
plt.show()
def run_scATAC_Buenrostro_Hemato(foldername = '/home/shobi/Trajectory/Datasets/scATAC_Hemato/', knn=20):
df = pd.read_csv(foldername+'scATAC_hemato_Buenrostro.csv', sep=',')
print('number cells', df.shape[0])
cell_types = ['GMP', 'HSC', 'MEP', 'CLP', 'CMP', 'LMuPP', 'MPP', 'pDC', 'mono', 'UNK']
cell_dict = {'UNK': 'gray', 'pDC': 'purple', 'mono': 'gold', 'GMP': 'orange', 'MEP': 'red', 'CLP': 'aqua',
'HSC': 'black', 'CMP': 'moccasin', 'MPP': 'darkgreen', 'LMuPP': 'limegreen'}
cell_annot = df['cellname'].values
true_label = []
found_annot = False
#re-formatting labels (abbreviating the original annotations for better visualization on plot labels)
for annot in cell_annot:
for cell_type_i in cell_types:
if cell_type_i in annot:
true_label.append(cell_type_i)
found_annot = True
if found_annot == False:
true_label.append('unknown')
found_annot = False
PCcol = ['PC1', 'PC2', 'PC3', 'PC4', 'PC5']
embedding = umap.UMAP(n_neighbors=20, random_state=2, repulsion_strength=0.5).fit_transform(df[PCcol])
fig, ax = plt.subplots(figsize=[20, 10])
for key in cell_dict:
loc = np.where(np.asarray(true_label) == key)[0]
ax.scatter(embedding[loc, 0], embedding[loc, 1], c=cell_dict[key], alpha=0.7, label=key, s=90)
plt.legend(fontsize='large', markerscale=1.3)
plt.title('Original Annotations on UMAP embedding')
plt.show()
knn = knn
random_seed = 4
X_in = df[PCcol].values
start_ncomp = 0
root = [1200] # HSC cell
v0 = via.VIA(X_in, true_label, jac_std_global=0.5, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=.15,
too_big_factor=0.3, root_user=root, dataset='scATAC', random_seed=random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=False)
v0.run_VIA()
tsi_list = via.get_loc_terminal_states(v0, X_in)
v1 = via.VIA(X_in, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=.15,
too_big_factor=0.1, super_cluster_labels=v0.labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root, is_coarse=False,
preserve_disconnected=True, dataset='scATAC',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed,
full_neighbor_array=v0.full_neighbor_array, full_distance_array=v0.full_distance_array,
ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned)
v1.run_VIA()
df['via1'] = v1.labels
df_mean = df.groupby('via1', as_index=False).mean()
gene_dict = {'ENSG00000092067_LINE336_CEBPE_D_N1': 'CEBPE Eosophil (GMP/Mono)',
'ENSG00000102145_LINE2081_GATA1_D_N7': 'GATA1 (MEP)'}
for key in gene_dict:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True, figsize=[10, 5])
v1.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_mean[key].values, title=gene_dict[key])
plt.show()
# get knn-graph and locations of terminal states in the embedded space
knn_hnsw = via.make_knn_embeddedspace(embedding)
super_clus_ds_PCA_loc = via.sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(v0.labels)))
# draw overall pseudotime and main trajectories
via.draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, v1.labels, v0.labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Pseudotime', ncomp=5 )
plt.show()
# draw trajectory and evolution probability for each lineage
via.draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath,
np.arange(0, len(true_label)), X_in)
plt.show()
def run_generic_discon(foldername ="/home/shobi/Trajectory/Datasets/Toy4/"):
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
df_ids = | pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids.csv", delimiter=",") | pandas.read_csv |
import turtle
import pandas
screen = turtle.Screen()
screen.title("US States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
data = | pandas.read_csv("50_states.csv") | pandas.read_csv |
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = | pd.Series([1, 2, 3]) | pandas.Series |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from FileUtil import FileUtil
from Util import Util
from constants import Constants
from pathlib import Path
import scipy.stats as stats
import seaborn as sns
import stats.CliffDelta as cld
from scipy import stats
class SLOCSelection:
def __init__(self, apply_change_filter=False):
self.apply_change_filter = apply_change_filter
def process(self, refAge, age_threshold, apply_change_filter=False, exclude_method=True):
result = []
print("Start creating sloc selection dataset ....")
if apply_change_filter:
outFile = Constants.BASE_PATH + "sloc/sloc_design_onlyChangedM_" + str(
age_threshold) + ".json"
else:
outFile = Constants.BASE_PATH + "sloc/sloc_design_" + str(
age_threshold) + ".json"
for json_file in Path(Constants.PROCESSED_DATA).rglob('*'):
repo = json_file.name.replace(".json", "")
data = FileUtil.load_json(json_file)
bugdata = FileUtil.load_json(Constants.BASE_PATH + "bugData/" + repo + ".json")
for m in data:
method = data[m]
# consider method with atleast one revisions
if apply_change_filter:
if len(method["changeDates"]) == 1:
continue
if exclude_method:
if method["Age"] < refAge:
continue
introSLOC = method["sloc"][0]
slocs = []
lastSLOC = None
mtdBugData = bugdata[m]
bugs = 0
total = 0
track = 0
for i in range(1, len(method["changeDates"])):
track = 1
total += 1
if mtdBugData["exactBug0Match"][i] == True:
bugs += 1
if method["changeDates"][i] > age_threshold:
lastSLOC = method["sloc"][i - 1]
break
slocs.append(method["sloc"][i])
if lastSLOC == None:
lastSLOC = method["sloc"][-1]
if track == 0 or len(slocs) == 0:
slocs = [method["sloc"][0]]
intro = introSLOC
last = lastSLOC
slocs = np.array(slocs)
avg = np.mean(slocs)
median = np.median(slocs)
std = np.std(slocs)
result.append({
"intro": int(intro),
"last": int(last),
"avg": float(avg),
"median": float(median),
"std": float(std),
"bug": int(bugs),
"totalChange": int(total),
"isGetterSetter": method["isGetter"][0] or method["isSetter"][0],
"repo": method['repo']
})
FileUtil.save_json(outFile, result)
print("Done preparing dataset for sloc selection....")
def apply_stats(self, x1, x2, label, repo, stats_to_apply="kendall"):
if stats_to_apply == "kendall":
corr, p_value = stats.kendalltau(x1, x2)
elif stats_to_apply == 'spearman':
corr, p_value = stats.spearmanr(x1, x2)
else:
corr, p_value = stats.pearsonr(x1, x2)
return {
"corr": round(corr, 2),
"p_value": p_value,
"significant": 'yes' if p_value < 0.05 else "no",
"group": label,
"repo": repo,
"type": stats_to_apply
}
def calculate_corr(self, age_threshold, apply_change_filter=False):
print("Start computing corr for sloc selection....")
if apply_change_filter:
result = FileUtil.load_json(
Constants.BASE_PATH + "sloc/sloc_design_onlyChangedM_" + str(age_threshold) + ".json")
outFile = Constants.BASE_PATH + "sloc/sloc_design_corr_onlyChangedM_" + str(
age_threshold) + ".csv"
else:
result = FileUtil.load_json(
Constants.BASE_PATH + "sloc/sloc_design_" + str(age_threshold) + ".json")
outFile = Constants.BASE_PATH + "sloc/sloc_design_corr_" + str(age_threshold) + ".csv"
df = | pd.DataFrame.from_dict(result) | pandas.DataFrame.from_dict |
#!/usr/bin/python3
import sys
import os
import shutil
import csv
import zipfile
import pandas as pd
import glob
infile = sys.argv[1]
outfile = sys.argv[2]
# remove holding_folder if it exists, and create new folder
# use 'rm -r /holding_folder/* in shell script instead?'
holding_path = '/media/secure_volume/holding_folder'
if os.path.isdir(holding_path):
shutil.rmtree(holding_path)
os.mkdir(holding_path)
def extract(infile):
'''
Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder.
'''
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = | pd.read_table(infile) | pandas.read_table |
import glob
import os
import sys
import numpy
import pandas
#METADATA_FILENAME = "20220121 Overview CG plates and compounds _small.xlsx"
METADATA_FILENAME = sys.argv[1]
## if you want to evaluate only a single imaging, do like this:
# QUERY = "`imaging campaign ID` == 'CQ1-ctf004-t12'"
## if you want to select all imagings, use:
QUERY = "ilevel_0 in ilevel_0"
## if you want to do selected imagings, use:
#QUERY = "`imaging campaign ID` == 'CQ1-ctf004-t0' or `imaging campaign ID` == 'CQ1-ctf004-t12'"
DO_I_HAVE_TO_MERGE_FILES_FIRST = True
def gather_csv_data_into_one_file(path_to_csv_files, output_filename = "output"):
print(f"merging csv files in {path_to_csv_files} ...")
filenames = glob.glob(f"{path_to_csv_files}/*Stats*.csv")
filenames = list([os.path.basename(f) for f in filenames])
if len(filenames)==0:
print("ERROR: no csv files found in the indicated location!")
raise
keys_of_files = [i[:-4] for i in filenames]
## check for titles longer than 31 characters -- some applications may not be able to read the file
keys_of_files_shortened = list(key[:31] for key in keys_of_files)
if len(set(keys_of_files_shortened)) < len(keys_of_files):
raise Exception
df_collect_all = None
for i, (filename_basename, filename_shortened) in enumerate(zip(keys_of_files, keys_of_files_shortened), start=1):
filename = filename_basename + ".csv"
print(f"Acting on file {i} of {len(keys_of_files)} ({filename})...")
df = pandas.read_csv(os.path.join(path_to_csv_files, filename))
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING = '] Count'
column_names_which_contain_the_word_count = [col for col in df.columns if
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING in col]
assert len(column_names_which_contain_the_word_count) == 1
#print(column_names_which_contain_the_word_count)
WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN = "Cell_Count_"
new_name_of_relevant_column = f"{WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN}{filename_shortened}"
df_renamed = df.rename(columns={ column_names_which_contain_the_word_count[0]: new_name_of_relevant_column })
#print(df_renamed)
MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES = [
# "ID" is not the same in all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description",
]
KEEP_THOSE_COLUMNS_INITIALLY = [
# "ID" is not the same in all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description"
]
if df_collect_all is None:
df_collect_all = df_renamed[KEEP_THOSE_COLUMNS_INITIALLY]
df_collect_all["well name"] = df_renamed["WellName"].str.replace("-","")
for col in MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES:
for x, y in zip(df_collect_all[col].values, df_renamed[col].values):
if pandas.isna(x) and pandas.isna(y):
continue
assert x == y, f"I expected that all tables would have the exactly same structure, but this is not the case: '{x}' != '{y}' "
assert not new_name_of_relevant_column in df_collect_all.columns
df_collect_all[new_name_of_relevant_column] = df_renamed[new_name_of_relevant_column]
print("Writing the file...")
df_collect_all.to_excel(output_filename, index=False)
print("...done.")
return df_collect_all
#### --- GET THE COMPOUND IDENTIFIERS ---
df_batches = pandas.read_excel(METADATA_FILENAME, sheet_name="compound batches")
df_compounds = pandas.read_excel(METADATA_FILENAME, sheet_name="compounds")
df_identifier = df_batches.merge(df_compounds, how="left", on="compound ID", validate="m:1")
## store expanded compound maps
print("expanding the compound maps...")
df_experiments = pandas.read_excel(METADATA_FILENAME, sheet_name="experiments")
compound_map_dict = {}
for see, _ in df_experiments.groupby("compound map see corresponding excel table"):
print(f"Expanding compound map '{see}'...")
df_compound_map = | pandas.read_excel(METADATA_FILENAME, sheet_name=f"compound map {see}") | pandas.read_excel |
#-coding: utf-8 -*-
# tools
import pandas as pd
import numpy as np
# interfacedb
from . import interfacedb
# datetime
import datetime
class HomeView:
DATE_TIME_COL = 'datetime'
MATCH_STATUS = 'status' # etat du match
# started
# nostarted
STARTED = 'STARTED'
NOSTARTED = 'NOSTARTED'
def __init__(self):
# les metasdatas
self.metadata = interfacedb.modelRequest.get_prediction_datas()
matchs_table = interfacedb.modelTables.matchs
def add_datetime_column():
self.metadata[matchs_table.date] = | pd.to_datetime(self.metadata[matchs_table.date]) | pandas.to_datetime |
# Bellfort Sequence Parser
## Modules
import numpy as np
import pandas as pd
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkf
from tkinter import messagebox
from tkinter import filedialog
import threading
import time
import os
import shutil
## Helper Functions
### Reverse Complement
def reverseComplement(sequence):
complement = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}
rc_sequence=''
for s in sequence:
rc_sequence = complement[s] + rc_sequence
return rc_sequence
### FASTQ File Browse
def buttonBrowseFASTQ():
global filenameFASTQ, indicator_preprocess
try:
filenameFASTQ = filedialog.askopenfilename(filetypes=(('FASTQ files', '*.fastq'),
('All files', '*.*')))
text_fileFASTQ.delete('1.0', tk.END)
text_fileFASTQ.insert('1.0', filenameFASTQ.split('/')[-1])
# Reset the progress bar///////////////
progressbar['value'] = 0
progressbar_loadFASTQ['value'] = 0
# Reset the percentage
text_percentage.delete('1.0', tk.END)
text_percentage.insert('1.0', str('0%'))
indicator_preprocess = 0
except:
filenameFASTQ = ''
### FASTQ File Load
def loadFASTQ():
global reads
start_time = time.time()
f = open(filenameFASTQ)
reads = []
try:
while 1:
name = f.readline().rstrip()
sequence = f.readline().rstrip()
f.readline()
quality = f.readline().rstrip()
if len(name) == 0:
break
union = name, sequence
reads.append(union)
end_time = time.time()
delta_time = end_time - start_time
text_time.delete('1.0', tk.END)
text_time.insert('1.0', str(delta_time))
text_readNum.delete('1.0', tk.END)
text_readNum.insert('1.0', str(len(reads)))
except:
messagebox.showwarning("File Loading Failed",
"Sorry, file loading failed! Please check the file format.")
f.close()
def start_loadFASTQ_thread(event):
global loadFASTQ_thread
if filenameFASTQ != '':
loadFASTQ_thread = threading.Thread(target=loadFASTQ)
loadFASTQ_thread.daemon = True
progressbar_loadFASTQ.start(10)
loadFASTQ_thread.start()
root.after(20, check_loadFASTQ_thread)
else:
messagebox.showwarning("No File",
"Sorry, no file loaded! Please choose FASTQ file first.")
def check_loadFASTQ_thread():
if loadFASTQ_thread.is_alive():
progressbar_loadFASTQ.start(10)
root.after(20, check_loadFASTQ_thread)
else:
progressbar_loadFASTQ.stop()
progressbar_loadFASTQ['value']=100
messagebox.showinfo("FASTQ File Loaded", "FASTQ file successfully loaded!")
### Divide FASTQ File
def divideFASTQ():
start_time = time.time()
gotten = text_readNumDivided.get('1.0', tk.END)
readNumDivided = int(gotten.rstrip())
if os.path.exists(filenameFASTQ+'.folder'):
# Remove the folder previously made:
shutil.rmtree(filenameFASTQ+'.folder')
# Make a new one:
os.makedirs(filenameFASTQ+'.folder')
line_num = 0
file_no = 1
f_input = open(filenameFASTQ)
f_output = open(filenameFASTQ+'.folder/' + filenameFASTQ.split('/')[-1] + '__Slice_No_' + str(file_no) + '.fastq', 'w')
while 1:
# Input ///////////////////////////////////
name = f_input.readline()
sequence = f_input.readline()
f_input.readline()
quality = f_input.readline()
if len(name) == 0:
break
# Output ////////////////////////////////////
f_output.write(name)
f_output.write(sequence)
f_output.write('+\n')
f_output.write(quality)
line_num += 1
if line_num == readNumDivided:
f_output.close()
file_no += 1
f_output = open(filenameFASTQ+'.folder/' + filenameFASTQ.split('/')[-1] + '__Slice_No_' + str(file_no) + '.fastq', 'w')
line_num = 0
end_time = time.time()
delta_time = end_time - start_time
text_time.delete('1.0', tk.END)
text_time.insert('1.0', str(delta_time))
f_input.close()
f_output.close()
def start_divideFASTQ_thread(event):
global divideFASTQ_thread
if filenameFASTQ != '':
divideFASTQ_thread = threading.Thread(target=divideFASTQ)
divideFASTQ_thread.daemon = True
progressbar_loadFASTQ.start(10)
divideFASTQ_thread.start()
root.after(20, check_divideFASTQ_thread)
else:
messagebox.showwarning("No File",
"Sorry, no file loaded! Please choose FASTQ file first.")
def check_divideFASTQ_thread():
if divideFASTQ_thread.is_alive():
progressbar_loadFASTQ.start(10)
root.after(20, check_divideFASTQ_thread)
else:
progressbar_loadFASTQ.stop()
progressbar_loadFASTQ['value']=100
messagebox.showinfo("FASTQ File Divided", "FASTQ file has been successfully divided!")
### Preprocess
def preprocessFASTQ():
global reads, indicator_preprocess, kmer_dict_reads
try:
num = len(reads)
indicator_preprocess = 0
gain = 50/num
gotten = text_sequence_len.get('1.0', tk.END)
k = int(gotten.rstrip())
if k > len(reads[0][1]):
messagebox.showwarning("Target Sequence Length Error",
"Sorry, the target sequence length is more than read length. Please check.")
elif k < 3:
messagebox.showwarning("Sequence Too Short",
"Sorry, the target sequence length is too short which will make the program running slowly. Please check.")
elif filenameSequences == '':
messagebox.showwarning("No Sequences Loaded",
"Sorry, no sequences loaded! Please load sequences first.")
else:
kmer_dict_reads = {}
start_time = time.time()
for read in reads:
for i in range(len(read[1])-k+1):
kmer_dict_reads[read[1][i:i+k]] = set()
indicator_preprocess += gain
for read in reads:
for i in range(len(read[1])-k+1):
kmer_dict_reads[read[1][i:i+k]].add(read)
indicator_preprocess += gain
indicator_progress = 100
# Add MatchAll Here ///////////////////////////////////////////////////////
matchAll()
end_time = time.time()
delta_time = end_time - start_time
text_time.delete('1.0', tk.END)
text_time.insert('1.0', str(delta_time))
messagebox.showinfo("Preprocess FASTQ & Count Matched Sequences Completed",
"Current FASTQ preprocess & matched sequence counts successfully completed!")
except NameError:
messagebox.showwarning("No FASTQ File Loaded",
"Sorry, no loaded FASTQ file found! Please load FASTQ file first.")
def start_preprocess_thread(event):
global preprocess_thread, indicator_preprocess
preprocess_thread = threading.Thread(target=preprocessFASTQ)
preprocess_thread.daemon = True
progressbar['value'] = indicator_preprocess
text_percentage.delete('1.0', tk.END)
text_percentage.insert('1.0', str(int(indicator_preprocess))+'%')
preprocess_thread.start()
root.after(20, check_preprocess_thread)
def check_preprocess_thread():
if preprocess_thread.is_alive():
progressbar['value'] = indicator_preprocess
text_percentage.delete('1.0', tk.END)
text_percentage.insert('1.0', str(int(indicator_preprocess))+'%')
root.after(20, check_preprocess_thread)
### Match All
def matchAll():
global kmer_dict_reads, indicator_matchAll, df
try:
len(kmer_dict_reads)
num = len(df)
if num == 0:
messagebox.showwarning("No Sequences Loaded",
"Sorry, no sequences loaded! Please load sequences first.")
else:
indicator_matchAll = 0
gain = 1000000/num
start_time = time.time()
arr = np.array(df)
for i in range(len(arr)):
key1 = arr[i,2]
key2 = reverseComplement(key1)
try:
n1 = len(kmer_dict_reads[key1])
except KeyError:
n1 = 0
try:
n2 = len(kmer_dict_reads[key2])
except KeyError:
n2 = 0
arr[i, 4] += n1 + n2
arr[i, 5] += 1
indicator_matchAll += gain
df = | pd.DataFrame(arr, columns = ['gene_id', 'UID', 'seq', 'Reserved', 'Count', 'Tag']) | pandas.DataFrame |
# Copyright 2021 The QUARK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import itertools
import json
import logging
import re
from datetime import datetime
from pathlib import Path
import inquirer
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
import pandas as pd
import seaborn as sns
import yaml
from applications.PVC.PVC import PVC
from applications.SAT.SAT import SAT
from applications.TSP.TSP import TSP
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('font', family='serif')
matplotlib.rcParams['savefig.dpi'] = 300
sns.set_style('darkgrid')
sns.color_palette()
class BenchmarkManager:
"""
The benchmark manager is the main component of QUARK orchestrating the overall benchmarking process.
Based on the configuration, the benchmark manager will create an experimental plan considering all combinations of
configurations, e.g., different problem sizes, solver, and hardware combinations. It will then instantiate the
respective framework components representing the application, the mapping to the algorithmic formulation, solver,
and device. After executing the benchmarks, it collects the generated data and executes the validation and evaluation
functions.
"""
def __init__(self):
"""
Constructor method
"""
self.application = None
self.application_configs = None
self.results = []
self.mapping_solver_device_combinations = {}
self.repetitions = 1
self.store_dir = None
def generate_benchmark_configs(self) -> dict:
"""
Queries the user to get all needed information about application, solver, mapping, device and general settings
to run the benchmark.
:return: Benchmark Config
:rtype: dict
"""
application_answer = inquirer.prompt([inquirer.List('application',
message="What application do you want?",
choices=['TSP', 'PVC', 'SAT'],
default='PVC',
)])
if application_answer["application"] == "TSP":
self.application = TSP()
elif application_answer["application"] == "PVC":
self.application = PVC()
elif application_answer["application"] == "SAT":
self.application = SAT()
application_config = self.application.get_parameter_options()
application_config = BenchmarkManager._query_for_config(application_config,
f"(Option for {application_answer['application']})")
config = {
"application": {
"name": application_answer["application"],
"config": application_config
},
"mapping": {}
}
mapping_answer = inquirer.prompt([inquirer.Checkbox('mapping',
message="What mapping do you want?",
choices=self.application.get_available_mapping_options(),
# default=[self.application.get_available_mapping_options()[0]]
)])
for mapping_single_answer in mapping_answer["mapping"]:
mapping = self.application.get_mapping(mapping_single_answer)
mapping_config = mapping.get_parameter_options()
mapping_config = BenchmarkManager._query_for_config(mapping_config, f"(Option for {mapping_single_answer})")
solver_answer = inquirer.prompt([inquirer.Checkbox('solver',
message=f"What Solver do you want for mapping {mapping_single_answer}?",
choices=mapping.get_available_solver_options()
)])
config["mapping"][mapping_single_answer] = {
"solver": [],
"config": mapping_config
}
for solver_single_answer in solver_answer["solver"]:
solver = mapping.get_solver(solver_single_answer)
solver_config = solver.get_parameter_options()
solver_config = BenchmarkManager._query_for_config(solver_config,
f"(Option for {solver_single_answer})")
device_answer = inquirer.prompt([inquirer.Checkbox('device',
message=f"What Device do you want for solver {solver_single_answer}?",
choices=solver.get_available_device_options()
)])
config["mapping"][mapping_single_answer]["solver"].append({
"name": solver_single_answer,
"config": solver_config,
"device": device_answer["device"]
})
repetitions_answer = inquirer.prompt(
[inquirer.Text('repetitions', message="How many repetitions do you want?",
validate=lambda _, x: re.match("\d", x),
default=self.repetitions
)])
config['repetitions'] = int(repetitions_answer["repetitions"])
logging.info(config)
return config
def load_config(self, config: dict) -> None:
"""
Uses the config file to generate all class instances needed to run the benchmark.
:param config: valid config file
:type config: dict
:rtype: None
"""
logging.info(config)
if config["application"]["name"] == "TSP":
self.application = TSP()
elif config["application"]["name"] == "PVC":
self.application = PVC()
elif config["application"]["name"] == "SAT":
self.application = SAT()
self.repetitions = int(config["repetitions"])
# Build all application configs
keys, values = zip(*config['application']['config'].items())
self.application_configs = [dict(zip(keys, v)) for v in itertools.product(*values)]
self.mapping_solver_device_combinations = {}
for mapping_name, mapping_value in config['mapping'].items():
mapping = self.application.get_mapping(mapping_name)
if len(mapping_value['config'].items()) > 0:
keys, values = zip(*mapping_value['config'].items())
mapping_config = [dict(zip(keys, v)) for v in itertools.product(*values)]
else:
mapping_config = [{}]
self.mapping_solver_device_combinations[mapping_name] = {
"mapping_instance": mapping,
"mapping_config": mapping_config,
"solvers": {}
}
for single_solver in mapping_value['solver']:
# Build all solver configs
if len(single_solver['config'].items()) > 0:
keys, values = zip(*single_solver['config'].items())
solver_config = [dict(zip(keys, v)) for v in itertools.product(*values)]
else:
solver_config = [{}]
solver = mapping.get_solver(single_solver['name'])
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']] = {
"solver_instance": solver,
"solver_config": solver_config
}
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']][
"devices"] = {}
for single_device in single_solver["device"]:
device_wrapper = solver.get_device(single_device)
self.mapping_solver_device_combinations[mapping_name]["solvers"][single_solver['name']][
"devices"][single_device] = device_wrapper
@staticmethod
def _query_for_config(config: dict, prefix: str = "") -> dict:
for key, config_answer in config.items():
if len(config_answer['values']) == 1:
# When there is only 1 value to choose from skip the user input for now
config[key] = config_answer['values']
else:
answer = inquirer.prompt(
[inquirer.Checkbox(key,
message=f"{prefix} {config_answer['description']}",
choices=config_answer['values']
)])
config[key] = answer[key] # TODO support strings
return config
def _create_store_dir(self, store_dir: str = None, tag: str = None) -> None:
"""
Creates directory for a benchmark run.
:param store_dir: Directory where the new directory should be created
:type store_dir: str
:param tag: prefix of the new directory
:type tag: str
:return:
:rtype: None
"""
if store_dir is None:
store_dir = Path.cwd()
self.store_dir = f"{store_dir}/benchmark_runs/{tag + '-' if not None else ''}{datetime.today().strftime('%Y-%m-%d-%H-%M-%S')}"
Path(self.store_dir).mkdir(parents=True, exist_ok=True)
def orchestrate_benchmark(self, config: dict, store_dir: str = None) -> None:
"""
Executes the benchmarks according to the given settings.
:param config: valid config file
:type config: dict
:param store_dir: target directory to store the results of the benchmark (if you decided to store it)
:type store_dir: str
:rtype: None
"""
# TODO Make this nicer
self.load_config(config)
self._create_store_dir(store_dir, tag=self.application.__class__.__name__.lower())
logger = logging.getLogger()
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
fh = logging.FileHandler(f"{self.store_dir}/logger.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.info(f"Created Benchmark run directory {self.store_dir}")
with open(f"{self.store_dir}/config.yml", 'w') as fp:
yaml.dump(config, fp)
try:
for idx, application_config in enumerate(self.application_configs):
problem = self.application.generate_problem(application_config)
results = []
path = f"{self.store_dir}/application_config_{idx}"
Path(path).mkdir(parents=True, exist_ok=True)
with open(f"{path}/application_config.json", 'w') as fp:
json.dump(application_config, fp)
self.application.save(path)
for mapping_name, mapping_value in self.mapping_solver_device_combinations.items():
mapping = mapping_value["mapping_instance"]
for mapping_config in mapping_value['mapping_config']:
for solver_name, solver_value in mapping_value["solvers"].items():
solver = solver_value["solver_instance"]
for solver_config in solver_value['solver_config']:
for device_name, device_value in solver_value["devices"].items():
device = device_value
for i in range(1, self.repetitions + 1):
mapped_problem, time_to_mapping = mapping.map(problem, mapping_config)
try:
logging.info(
f"Running {self.application.__class__.__name__} with config {application_config} on solver {solver.__class__.__name__} and device {device.get_device_name()} (Repetition {i}/{self.repetitions})")
solution_raw, time_to_solve = solver.run(mapped_problem, device,
solver_config, store_dir=path,
repetition=i)
processed_solution, time_to_reverse_map = mapping.reverse_map(solution_raw)
try:
processed_solution, time_to_process_solution = self.application.process_solution(
processed_solution)
solution_validity, time_to_validation = self.application.validate(
processed_solution)
except Exception as e:
solution_validity = False
time_to_process_solution = None
time_to_validation = None
if solution_validity:
solution_quality, time_to_evaluation = self.application.evaluate(
processed_solution)
else:
solution_quality = None
time_to_evaluation = None
results.append({
"timestamp": datetime.today().strftime('%Y-%m-%d-%H-%M-%S'),
"time_to_solution": sum(filter(None, [time_to_mapping, time_to_solve,
time_to_reverse_map,
time_to_process_solution,
time_to_validation,
time_to_evaluation])),
"time_to_solution_unit": "ms",
"time_to_process_solution": time_to_process_solution,
"time_to_process_solution_unit": "ms",
"time_to_validation": time_to_validation,
"time_to_validation_unit": "ms",
"time_to_evaluation": time_to_evaluation,
"time_to_evaluation_unit": "ms",
"solution_validity": solution_validity,
"solution_quality": solution_quality,
"solution_quality_unit": self.application.get_solution_quality_unit(),
"solution_raw": str(solution_raw),
# TODO Revise this (I am only doing this for now since json.dumps does not like tuples as keys for dicts
"time_to_solve": time_to_solve,
"time_to_solve_unit": "ms",
"repetition": i,
"application": self.application.__class__.__name__,
"application_config": application_config,
"mapping_config": mapping_config,
"time_to_reverse_map": time_to_reverse_map,
"time_to_reverse_map_unit": "ms",
"time_to_mapping": time_to_mapping,
"time_to_mapping_unit": "ms",
"solver_config": solver_config,
"mapping": mapping.__class__.__name__,
"solver": solver.__class__.__name__,
"device_class": device.__class__.__name__,
"device": device.get_device_name()
})
with open(f"{path}/results.json", 'w') as fp:
json.dump(results, fp)
df = self._collect_all_results()
self._save_as_csv(df)
except Exception as e:
logging.error(f"Error during benchmark run: {e}", exc_info=True)
with open(f"{path}/error.log", 'a') as fp:
fp.write(
f"Solver: {solver_name}, Device: {device_name}, Error: {str(e)} (For more information take a look at logger.log)")
fp.write("\n")
with open(f"{path}/results.json", 'w') as fp:
json.dump(results, fp)
# catching ctrl-c and killing network if desired
except KeyboardInterrupt:
logger.info("CTRL-C detected. Still trying to create results.csv.")
df = self._collect_all_results()
self._save_as_csv(df)
def _collect_all_results(self) -> pd.DataFrame:
"""
Collect all results from the multiple results.json.
:return: a pandas dataframe
:rtype: pd.Dataframe
"""
dfs = []
for filename in glob.glob(f"{self.store_dir}/**/results.json"):
dfs.append(pd.read_json(filename, orient='records'))
if len(dfs) == 0:
logging.error("No results.json files could be found! Probably an error was previously happening.")
return pd.concat(dfs, axis=0, ignore_index=True)
def _save_as_csv(self, df: pd.DataFrame) -> None:
"""
Save all the results of this experiments in a single CSV.
:param df: Dataframe which should be saved
:type df: pd.Dataframe
"""
# Since these configs are dicts it is not so nice to store them in a df/csv. But this is a workaround that works for now
df['application_config'] = df.apply(lambda row: json.dumps(row["application_config"]), axis=1)
df['solver_config'] = df.apply(lambda row: json.dumps(row["solver_config"]), axis=1)
df['mapping_config'] = df.apply(lambda row: json.dumps(row["mapping_config"]), axis=1)
df.to_csv(path_or_buf=f"{self.store_dir}/results.csv")
def load_results(self, input_dirs: list = None) -> pd.DataFrame:
"""
Load results from one or many results.csv files.
:param input_dirs: If you want to load more than 1 results.csv (default is just 1, the one from the experiment)
:type input_dirs: list
:return: a pandas dataframe
:rtype: pd.Dataframe
"""
if input_dirs is None:
input_dirs = [self.store_dir]
dfs = []
for input_dir in input_dirs:
for filename in glob.glob(f"{input_dir}/results.csv"):
dfs.append(pd.read_csv(filename, index_col=0, encoding="utf-8"))
df = pd.concat(dfs, axis=0, ignore_index=True)
df['application_config'] = df.apply(lambda row: json.loads(row["application_config"]), axis=1)
df['solver_config'] = df.apply(lambda row: json.loads(row["solver_config"]), axis=1)
df['mapping_config'] = df.apply(lambda row: json.loads(row["mapping_config"]), axis=1)
return df
def summarize_results(self, input_dirs: list) -> None:
"""
Helper function to summarize multiple experiments.
:param input_dirs: list of directories
:type input_dirs: list
:rtype: None
"""
self._create_store_dir(tag="summary")
df = self.load_results(input_dirs)
# Deep copy, else it messes with the json.loads in save_as_csv
self._save_as_csv(df.copy())
self.vizualize_results(df, self.store_dir)
def vizualize_results(self, df: pd.DataFrame, store_dir: str = None) -> None:
"""
Generates various plots for the benchmark.
:param df: pandas dataframe
:type df: pd.Dataframe
:param store_dir: directory where to store the plots
:type store_dir: str
:rtype: None
"""
if store_dir is None:
store_dir = self.store_dir
if len(df['application'].unique()) > 1:
logging.error("At the moment only 1 application can be visualized! Aborting plotting process!")
return
# Let's create some custom columns
df['configCombo'] = df.apply(lambda row: f"{row['mapping']}/\n{row['solver']}/\n{row['device']}", axis=1)
df, eval_axis_name = self._compute_application_config_combo(df)
df['solverConfigCombo'] = df.apply(
lambda row: '/\n'.join(
['%s: %s' % (key, value) for (key, value) in row['solver_config'].items()]) +
"\ndevice:" + row['device'] + "\nmapping:" + '/\n'.join(
['%s: %s' % (key, value) for (key, value) in row['mapping_config'].items()]), axis=1)
df_complete = df.copy()
df = df.loc[df["solution_validity"] == True]
if df.shape[0] < 1:
logging.warning("Not enough (valid) data to visualize results, skipping the plot generation!")
return
self._plot_overall(df, store_dir, eval_axis_name)
self._plot_solvers(df, store_dir, eval_axis_name)
self._plot_solution_validity(df_complete, store_dir)
@staticmethod
def _compute_application_config_combo(df: pd.DataFrame) -> (pd.DataFrame, str):
"""
Tries to infer the column and the axis name used for solution_quality in a smart way.
:param df: pandas dataframe
:type df: pd.Dataframe
:return: Dataframe and the axis name
:rtype: tuple(pd.DataFrame, str)
"""
column = df['application_config']
affected_keys = []
helper_dict = defaultdict(list)
# Try to find out which key in the dict change
for d in column.values: # you can list as many input dicts as you want here
for key, value in d.items():
helper_dict[key].append(value)
helper_dict[key] = list(set(helper_dict[key]))
for key, value in helper_dict.items():
# If there is more than 1 value and it is a float/int, then we can order it
if len(value) > 1: # and isinstance(value[0], (int, float))
affected_keys.append(key)
# def custom_sort(series):
# return sorted(range(len(series)), key=lambda k: tuple([series[k][x] for x in affected_keys]))
#
# # Sort by these keys
# df.sort_values(by=["application_config"], key=custom_sort, inplace=True)
if len(affected_keys) == 1:
# X axis name should be this and fixed parameters in parenthesis
df['applicationConfigCombo'] = df.apply(
lambda row: row['application_config'][affected_keys[0]],
axis=1)
axis_name = f"{affected_keys[0]}" if len(
helper_dict.keys()) == 1 else f"{affected_keys[0]} with {','.join(['%s %s' % (value[0], key) for (key, value) in helper_dict.items() if key not in affected_keys])}"
else:
df['applicationConfigCombo'] = df.apply(
lambda row: '/\n'.join(['%s: %s' % (key, value) for (key, value) in row['application_config'].items() if
key in affected_keys]), axis=1)
axis_name = None
return df, axis_name
@staticmethod
def _plot_solution_validity(df_complete: pd.DataFrame, store_dir: str) -> None:
"""
Generates plot for solution_validity.
:param df_complete: pandas dataframe
:type df_complete: pd.DataFrame
:param store_dir: directory where to store the plot
:type store_dir: str
:rtype: None
"""
def countplot(x, hue, **kwargs):
sns.countplot(x=x, hue=hue, **kwargs)
g = sns.FacetGrid(df_complete,
col="applicationConfigCombo")
g.map(countplot, "configCombo", "solution_validity")
g.add_legend(fontsize='7', title="Result Validity")
g.set_ylabels("Count")
g.set_xlabels("Solver Setting")
for ax in g.axes.ravel():
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
g.tight_layout()
plt.savefig(f"{store_dir}/plot_solution_validity.pdf", dpi=300)
plt.clf()
@staticmethod
def _plot_solvers(df: pd.DataFrame, store_dir: str, eval_axis_name: str) -> None:
"""
Generates plot for each individual solver.
:param eval_axis_name: name of the evaluation metric
:type eval_axis_name: str
:param df: pandas dataframe
:type df: pd.Dataframe
:param store_dir: directory where to store the plot
:type store_dir: str
:rtype: None
"""
def _barplot(data, x, y, hue=None, title="TBD", ax=None, order=None,
hue_order=None, capsize=None):
sns.barplot(x=x, y=y, hue=hue, data=data, ax=ax, order=order, hue_order=hue_order,
capsize=capsize) # , palette="Dark2"
plt.title(title)
return plt
for solver in df['solver'].unique():
figu, ax = plt.subplots(1, 2, figsize=(15, 10))
_barplot(
df.loc[df["solver"] == solver],
"applicationConfigCombo", "time_to_solve", hue='solverConfigCombo', order=None,
title=None, ax=ax[0])
_barplot(
df.loc[df["solver"] == solver],
"applicationConfigCombo", "solution_quality", hue='solverConfigCombo', order=None,
title=None, ax=ax[1])
ax[0].get_legend().remove()
# ax[1].get_legend().remove()
# plt.legend(bbox_to_anchor=[1.5, .5], loc=9, frameon=False, title="Solver Settings")
ax[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), title="Solver Settings")
ax[0].set_xlabel(xlabel=eval_axis_name, fontsize=16)
ax[1].set_xlabel(xlabel=eval_axis_name, fontsize=16)
ax[0].set_ylabel(ylabel=df['time_to_solve_unit'].unique()[0], fontsize=16)
# ax[0].set_yscale('log', base=10)
ax[1].set_ylabel(ylabel=df['solution_quality_unit'].unique()[0], fontsize=16)
plt.suptitle(f"{solver}")
for ax in figu.axes:
matplotlib.pyplot.sca(ax)
# If column values are very long and of type string rotate the ticks
if (pd.api.types.is_string_dtype(df.applicationConfigCombo.dtype) or pd.api.types.is_object_dtype(
df.applicationConfigCombo.dtype)) and df.applicationConfigCombo.str.len().max() > 10:
plt.xticks(rotation=90)
ax.set_xlabel(
xlabel=f"{df['application'].unique()[0]} Config {'(' + eval_axis_name + ')' if eval_axis_name is not None else ''}",
fontsize=12)
figu.tight_layout()
# plt.suptitle("Edge Inference: Preprocessing")
# plt.subplots_adjust(top=0.92)
logging.info(f"Saving plot for solver {solver}")
plt.savefig(f"{store_dir}/plot_{solver}" + ".pdf")
plt.clf()
@staticmethod
def _plot_overall(df: pd.DataFrame, store_dir: str, eval_axis_name: str) -> None:
"""
Generates time and solution_quality plots for all solvers.
:param eval_axis_name: name of the evaluation metric
:type eval_axis_name: str
:param df: pandas dataframe
:type df: pd.Dataframe
:param store_dir: directory where to store the plot
:type store_dir: str
:rtype: None
"""
for metric in ["solution_quality", "time_to_solve"]:
needed_col_wrap = df['solver'].nunique()
g = sns.FacetGrid(df, col="solver", hue="solverConfigCombo", col_wrap=needed_col_wrap, legend_out=True)
if len(df.applicationConfigCombo.unique()) < 2:
g.map(sns.barplot, "applicationConfigCombo", metric,
order=df["applicationConfigCombo"])
else:
g.map(sns.lineplot, "applicationConfigCombo", metric, marker="X")
g.set(xticks=list(df.applicationConfigCombo.unique()),
xticklabels=list(df.applicationConfigCombo.unique()))
g.set_xlabels(
f"{df['application'].unique()[0]} Config {'(' + eval_axis_name + ')' if eval_axis_name is not None else ''}")
if metric == "time_to_solve":
g.set_ylabels(df['time_to_solve_unit'].unique()[0])
# for ax in g.axes:
# ax.set_yscale('log', basex=10)
else:
g.set_ylabels(df['solution_quality_unit'].unique()[0])
g.add_legend(fontsize='7')
# If column values are very long and of type string rotate the ticks
if ( | pd.api.types.is_string_dtype(df.applicationConfigCombo.dtype) | pandas.api.types.is_string_dtype |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import databricks.koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class CategoricalTest(ReusedSQLTestCase, TestUtils):
def test_categorical_frame(self):
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf, pdf)
self.assert_eq(kdf.a, pdf.a)
self.assert_eq(kdf.b, pdf.b)
self.assert_eq(kdf.index, pdf.index)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
kser = ks.Series([1, 2, 3], dtype="category")
self.assert_eq(kser, pser)
self.assert_eq(kser.cat.categories, pser.cat.categories)
self.assert_eq(kser.cat.codes, pser.cat.codes)
self.assert_eq(kser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(kser.astype("category"), pser.astype("category"))
self.assert_eq(
kser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = kser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype( | CategoricalDtype(["b", "c", "a"]) | pandas.api.types.CategoricalDtype |
# ClinVarome annotation functions
# Gather all genes annotations : gene, gene_id,
# (AF, FAF,) diseases, clinical features, mecanismes counts, nhomalt.
# Give score for genes according their confidence criteria
# Commented code is the lines needed to make the AgglomerativeClustering
import pandas as pd
import numpy as np
import pysam
from scipy.stats import poisson
# from sklearn.preprocessing import QuantileTransformer
# from sklearn.cluster import AgglomerativeClustering
from clinvarome.utils.dictionary import (
EFFECTS,
MC_CATEGORIES,
MC_SHORT,
# ARRAY_TRANSFORM,
# CLUSTER_NAMES,
)
import logging
# For logs
def get_logger(scope: str, level=logging.DEBUG):
"""
get_logger
"""
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=level
)
return logging.getLogger(scope)
logger = get_logger(__name__)
# Clinical features
def gather_clinical_features(record, gene_finding, gene_disease):
"""
update gene_finding and gene_disease dictionary using information from a VCF record
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
if "CLNDISEASE" in record.info:
clndisease = record.info["CLNDISEASE"][0].split("|")
gene_disease.setdefault(geneinfo, [])
gene_disease[geneinfo].append(clndisease)
if "CLNFINDING" in record.info:
clnfinding = record.info["CLNFINDING"][0].split("|")
gene_finding.setdefault(geneinfo, [])
gene_finding[geneinfo].append(clnfinding)
def get_clinical_dataframe(gene_disease, gene_finding):
"""
Process dictionary output from gather_clinical_features function
into a dataframe
"""
for key, value in gene_disease.items():
flat_list = [j for i in value for j in i]
gene_disease[key] = ";".join(sorted(list(set(flat_list))))
gene_disease_df = pd.DataFrame(
gene_disease.items(), columns=["gene_info", "clinical_disease"]
)
for key, value in gene_finding.items():
flat_list = [j for i in value for j in i]
gene_finding[key] = ";".join(sorted(list(set(flat_list))))
gene_finding_df = pd.DataFrame(
gene_finding.items(), columns=["gene_info", "clinical_finding"]
)
gene_features = gene_disease_df.merge(gene_finding_df, how="outer")
return gene_features
# FAF
def calcul_max_AF(AC, AN):
"""
For a given AC and AN, calcul the maximum AF: the
upper bound of the Poisson 95 % CI.
"""
if (AC == 0) and (AN != 0):
max_AF_pois = 1 / AN
elif (AC != 0) and (AN != 0):
max_AC_pois = poisson.ppf(0.95, AC)
max_AF_pois = float(max_AC_pois / AN)
else:
max_AF_pois = 0
return max_AF_pois
def gather_dict_gene_max_AF(record, gene_AF_pois_dict):
"""
Update the maximum FAF of a gene using information in a VCF record
"""
ls_AC = []
ls_AN = []
ls_AF_pois = []
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_AF_pois_dict.setdefault(geneinfo, [])
if "AC_afr" in record.info:
AC_afr = record.info["AC_afr"]
AC_amr = record.info["AC_amr"]
AC_nfe = record.info["AC_nfe"]
AC_eas = record.info["AC_eas"]
AN_afr = record.info["AN_afr"]
AN_amr = record.info["AN_amr"]
AN_nfe = record.info["AN_nfe"]
AN_eas = record.info["AN_eas"]
ls_AC = [AC_afr, AC_amr, AC_nfe, AC_eas]
ls_AN = [AN_afr, AN_amr, AN_nfe, AN_eas]
for k in range(0, len(ls_AC)):
ls_AF_pois.append(calcul_max_AF(ls_AC[k], ls_AN[k]))
max_af_pois = max(ls_AF_pois)
gene_AF_pois_dict[geneinfo].append(max_af_pois)
else:
gene_AF_pois_dict[geneinfo].append(0)
def get_AF_max_by_gene(gene_AF_dict):
"""For a given gene, return the maximum FAF (among its variants)
and get a dataframe."""
gene_AF_max = {}
for key, values in gene_AF_dict.items():
gene_max_AF = max(values)
gene_AF_max.setdefault(key, [])
gene_AF_max[key].append(gene_max_AF)
print(gene_AF_max)
gene_anno_pois = pd.DataFrame.from_dict(
gene_AF_max, orient="index", columns=["FAF"]
)
gene_anno_pois = gene_anno_pois.reset_index()
gene_anno_pois = gene_anno_pois.rename(columns={"index": "gene_info"})
print(gene_anno_pois)
return gene_anno_pois
# Molecular consequence counts
def mol_consequences_by_variant(record, gene_var_dict):
"""
Parse molecular consequences (mc) available for a variant and
return the highest predicted effect
"""
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_var_dict.setdefault(geneinfo, [])
if "MC" in record.info:
mc = record.info["MC"]
mc_only = [i.split("|")[1] for i in mc]
min_value = min([v for k, v in EFFECTS.items() if k in mc_only])
for key, value in EFFECTS.items():
if min_value == value:
gene_var_dict[geneinfo].append(MC_CATEGORIES[key])
break
else:
gene_var_dict[geneinfo].append("Not_provided")
def count_type_mol_consequences(gene_var_dict):
"""
Count occurence of molecular consequence (mc)from pathogenic
variant for each gene
"""
gene_mc_count = {}
for key, values in gene_var_dict.items():
list_mc = []
for k in MC_SHORT.keys():
if k in values:
count = values.count(k)
list_mc.append([count, k])
gene_mc_count.setdefault(key, [])
gene_mc_count[key].append(list_mc)
return gene_mc_count
def get_mol_consequences_dataframe(gene_var_dict):
"""
Format molecular consequences occurences (mc) by gene dictionary into dataframe.
"""
gene_mc_count = count_type_mol_consequences(gene_var_dict)
df_tot = pd.DataFrame()
for key, values in gene_mc_count.items():
for k in range(len(values[0])):
mecanism_dict = {}
mecanism_dict[key] = values[0][k]
df = pd.DataFrame.from_dict(
mecanism_dict, orient="index", columns=["count", "mecanism"]
)
df_tot = df_tot.append(df)
df_tot.index.name = "gene_info"
df_tot_piv = pd.pivot_table(
df_tot, values="count", index="gene_info", columns=["mecanism"], fill_value=0,
)
return df_tot_piv
# nhomalt annotation
def get_nhomalt(record, gene_nhomalt):
"""
Return count of homozygous allele in gnomad for a pathogenic variant.
"""
if "nhomalt" in record.info:
nhomalt = record.info["nhomalt"][0]
geneinfo = record.info["GENEINFO"].split("|")[0].split(":")[0]
gene_nhomalt.setdefault(geneinfo, [])
gene_nhomalt[geneinfo].append(nhomalt)
return gene_nhomalt
def get_max_nhomalt_by_gene(gene_nhomalt):
"""
Get the maximum count of homozygous pathogenic allele in gnomad by gene.
Return a dataframe.
"""
gene_nhomalt_max = {}
for key, values in gene_nhomalt.items():
nhomalt_max = max(values)
gene_nhomalt_max.setdefault(key, [])
gene_nhomalt_max[key].append(nhomalt_max)
gene_nhomalt_max_df = pd.DataFrame.from_dict(
gene_nhomalt_max, orient="index", columns=["nhomalt"]
)
gene_nhomalt_max_df = gene_nhomalt_max_df.reset_index()
gene_nhomalt_max_df = gene_nhomalt_max_df.rename(columns={"index": "gene_info"})
return gene_nhomalt_max_df
# Gene date
def gene_first_pathogenic_entry_date(clinvarome_df, compare_gene):
"""
Return the first occurence of a (lickely) pathogenic variant for a gene in ClinVar.
"""
compare_gene_df = pd.read_csv(compare_gene, sep="\t", compression="gzip")
compare_gene_df = compare_gene_df[
compare_gene_df["pathogenic_class_status"] == "NEW_PATHOGENICITY"
]
compare_gene_df = compare_gene_df.sort_values(by="name_clinvar_new", ascending=True)
compare_gene_df.drop_duplicates("gene_info", inplace=True)
clinvar_gene = clinvarome_df[["gene_info"]].merge(
compare_gene_df[["gene_info", "name_clinvar_new"]], on="gene_info", how="outer",
)
clinvar_gene.fillna(value="20170703", inplace=True)
clinvar_gene["name_clinvar_new"] = pd.to_datetime(
clinvar_gene["name_clinvar_new"], format="%Y%m%d"
)
clinvar_gene.rename(
columns={"name_clinvar_new": "first_path_var_date"}, inplace=True
)
return clinvar_gene
def gene_latest_pathogenic_entry_date(clinvarome_df, compare_variant):
"""
Return the last occurence of (likely) pathogenic variant for a gene in ClinVar.
"""
compare_variant_df = | pd.read_csv(compare_variant, sep="\t", compression="gzip") | pandas.read_csv |
import re
import numpy as np
import pandas as pd
from arc._common import prob_metric_cal
import matchzoo as mz
from arc.anmm_impl import anmm_train
from arc.arci_impl import arci_train
from arc.arcii_impl import arcii_train
from arc.bimpm_impl import bimpm_train
from arc.cdssm_impl import cdssm_train
from arc.conv_knrm_impl import conv_knrm_train
from arc.diin_impl import diin_train
from arc.drmm_impl import drmm_train
from arc.drmmtks_impl import drmmtks_train
from arc.dssm_impl import dssm_train
from arc.duet_impl import duet_train
from arc.esim_impl import esim_train
from arc.hbmp_impl import hbmp_train
from arc.knrm_impl import knrm_train
from arc.match_lstm_impl import match_lstm_train
from arc.match_pyramid_impl import match_pyramid_train
from arc.match_srnn_impl import match_srnn_train
from arc.mv_lstm_impl import mv_lstm_train
from utils.util_params import arc_params_control
def trans_text(str_data_list):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = np.array(num_list, dtype=np.float32)
res.append(num_arr)
print('Shape of text: ', np.array(res).shape)
return res
def trans_ngram(str_data_list, ngram=3):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = []
for i in range(len(num_list)):
if i < len(num_list) - ngram + 1:
gram = num_list[i: i + ngram]
else:
gram = num_list[i: len(num_list)] + [0] * (ngram - (len(num_list) - i))
num_arr.append(gram)
res.append(np.array(num_arr, dtype=np.float))
print('Shape of n-gram: ', np.array(res).shape)
return res
def trans_hist(str_data_list_left, str_data_list_right, bin_size):
res_left = trans_ngram(str_data_list_left, 5)
res_right = trans_ngram(str_data_list_right, 5)
res_len = len(res_right[0])
for left_text, right_text in zip(res_left, res_right):
for i in range(res_len):
score_list = []
for j in range(res_len):
score = np.dot(left_text[i], right_text[j]) / (np.linalg.norm(left_text[i]) * (np.linalg.norm(right_text[j])))
score_list.append(score)
# print('Shape of n-gram: ', np.array(res).shape)
# return res
def trans_pd(file_name, arc, params):
pd_data = pd.read_csv(file_name)
id_left_list = pd_data['id_left'].values
text_left_list = trans_text(pd_data['text_left'].values)
length_left_list = list(map(int, pd_data['length_left'].values))
id_right_list = pd_data['id_right'].values
text_right_list = trans_text(pd_data['text_right'].values)
length_right_list = list(map(int, pd_data['length_right'].values))
label_list = list(map(float, pd_data['label'].values))
if arc == 'dssm':
# ngram_left_list = trans_ngram(pd_data['text_left'].values, params['ngram'])
# ngram_right_list = trans_ngram(pd_data['text_right'].values, params['ngram'])
data = {'id_left': pd.Series(id_left_list),
'text_left': pd.Series(text_left_list),
'ngram_left': pd.Series(text_left_list),
'length_left': pd.Series(length_left_list),
'id_right': pd.Series(id_right_list),
'text_right': pd.Series(text_right_list),
'ngram_right': | pd.Series(text_right_list) | pandas.Series |
"""Personal Challenge_Draft.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-25-B3CO6yVCH9u2vgbhIjyyFeU3tJ3w
"""
# Working environment set up
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import seaborn as sns
from nltk.corpus import wordnet
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import RandomForestClassifier
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data():
'''
This function will separately return the features and response variable for the input data
'''
data = pd.read_csv('data.csv')
X = data['Lyric']
y = data['Genre']
return X, y
# Use pos_tag to get the type of the world and then map the tag to the format wordnet lemmatizer would accept.
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def transform_data():
'''
This function will transform the features and will reuturn the countvectorized features.
Steps are:
1. Remove punctuations
2. Tokenize
3. Lemmatization
4. Remove stop words
5. CountVectorize
'''
X, y = load_data()
X = X.apply(lambda x: x.translate(str.maketrans('', '', string.punctuation))) # To remove the punctuations
X_Tokenize = X.apply(lambda x: word_tokenize(x)) # To tokenize
lemmatizer = WordNetLemmatizer()
X_lemmatize = X_Tokenize.apply(lambda x: ' '.join([lemmatizer.lemmatize(w, pos='v') for w in x]))
stop_words = set(stopwords.words('english'))
stop_words_more = ('10', '100', '20', '2x', '3x', '4x', '50', 'im') # Add more stop words
stop_words = stop_words.add(x for x in stop_words_more)
CountVect = CountVectorizer(stop_words=stop_words, min_df=300, lowercase=True, ngram_range=(1, 1))
Transformmed_array = CountVect.fit_transform(X_lemmatize)
X_vectorized = pd.DataFrame(Transformmed_array.toarray(), columns=CountVect.get_feature_names())
return X_vectorized, y
def EDA_visualize(X, y, N):
'''
:para X: X is the features to be trained
:para y: y is the Gnere classification to be trained
:para N: nlargest frequencied words for each type of Genre
:return: 1. Barplot to visulize the counts for each type of y 2. Return the n largest frequencies words for each type of y
'''
sns.catplot(x='Genre', kind='count', data=pd.DataFrame(y[:50000]))
DF_Combine = pd.concat([X, y], axis=1)
DF_nlargest = pd.DataFrame(np.ones((3, 1)), columns=['exm'], index=['Hip Hop', 'Pop', 'Rock']) # Initilnize
for value in DF_Combine.columns[:-1]:
DF_nlargest[value] = pd.DataFrame(DF_Combine.groupby('Genre')[value].sum())
print(DF_nlargest.apply(lambda s, n: s.nlargest(n).index, axis=1, n=N))
# X_temp, y_temp = transform_data()
def TuneParameter_visulize(X_train, y_train, X_hold, y_hold):
'''
It will return severl plots aims to tune paramters.
parameters are:
1. max_depth
2. n_estimators
3. max_features...
Todo: plotting more parameters
'''
# Tune max_depth
max_depths = np.linspace(10, 200, 15, endpoint=True)
train_results = []
validation_results = []
for depth in max_depths:
rf = RandomForestClassifier(max_depth=depth, n_jobs=-1)
rf.fit(X_train, y_train)
train_results.append(accuracy_score(y_train, rf.predict(X_train)))
validation_results.append(accuracy_score(y_hold, rf.predict(X_hold)))
line1 = plt.plot(max_depths, train_results, 'b', label='Train accuracy')
line2 = plt.plot(max_depths, validation_results, 'r', label='Estimated accuracy')
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel('accuracy score')
plt.xlabel('Tree depth')
plt.show()
def main():
'''
It will return:
1. EDA visulization
2. Visulize parameter tuning process
3. Series include Expected accuracy
4. Series include the predicted y_test
'''
# Load data
X_input, y_input = transform_data()
# Train, holdset, test split
y_test = pd.DataFrame(y_input[-5000:], columns=['Genre'])
y_train = pd.DataFrame(y_input[:50000], columns=['Genre'])
X_train = pd.DataFrame(X_input.iloc[:50000, :], columns=X_input.columns)
X_test = pd.DataFrame(X_input.iloc[-5000:, :], columns=X_input.columns)
X_holdout_set = X_train.sample(5000, random_state=66)
y_holdout_set = y_train.iloc[X_holdout_set.index, :]
X_train_new = X_train.drop(X_holdout_set.index)
y_train_new = y_train.drop(X_holdout_set.index)
EDA_visualize(X_train, y_train, 10) # For EDA purpose
# Build classifier
'''
The RF model will be used. Few reasons below:
1. An ensemble (bootstrap) approach might make stronger predictions, without causing serious overfitting
2. Compared with distance methods, it needs less datapreprocessing (such as scaling data)
3. Non-parametric estimation
However, it may have an obvious drawback:
1. May set large max_features
2. Should consider more deeper depth
The drawbacks above will directly triggle the large training workload.
'''
# TuneParameter_visulize(X_train_new,y_train_new, X_holdout_set, y_holdout_set) # Tune parameters
RF_Model = RandomForestClassifier(criterion='entropy', n_estimators=100, max_depth=56, max_features=666)
RF_Model.fit(X_train_new, y_train_new)
estimated_accuracy = accuracy_score(y_holdout_set, RF_Model.predict(X_holdout_set))
| pd.Series(estimated_accuracy) | pandas.Series |
import pandas as pd
import numpy as np
import scipy
import os, sys, time, json, math
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from os.path import join
from datetime import datetime
from scipy.integrate import odeint
from numpy import loadtxt
from scipy.optimize import minimize
rootDir = os.path.abspath(os.path.curdir)
print(rootDir)
sys.path.insert(0, os.path.join(rootDir, 'lib'))
## use JD's optimizer
#from systemSolver import optimizer as optimizer
from optimizer import Differential_Evolution
from getPatientData import getPatientData
import copy
from matplotlib.font_manager import FontProperties
#riskConfig = json.load(open('amgen-risk-model/amgen-risk-model/config/riskModel_3y_LipidCxoOptimized.json'))
# classConfig = json.load(open(riskConfig['patientClassConfig']))
classConfig = json.load(open('../config/lipidoptimizing.json'))
def differentialequations(I, t, p):
'''
This function has the differential equations of the lipids (LDL,
Total Cholesterol, Triglyceride, HDL)
Inputs:
I: Initial conditions
t: timepoints
p: parameters
'''
try:
# Initial conditions
Cldl, Cchol, Ctrig, Chdl = I
# Parameters
adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl = p
t = np.round(t)
t = t.astype(int)
# print dose.shape
if t > (dose.shape[0] - 1):
t = (dose.shape[0] - 1)
div = (Ic50+(dose[t]*adherence[t])**n)
h0 = ((dose[t] * adherence[t])**n)
# llipid equation
dCldldt = (Sx0ldl * (1 - np.sum((Imaxldl*h0)/div))) - (dx*Cldl)
dCcholdt = (Sx0chol * (1 - np.sum((Imaxchol*h0)/div))) - (dx*Cchol)
dCtrigdt = (Sx0trig * (1 - np.sum((Imaxtrig*h0)/div))) - (dx*Ctrig)
dChdldt = (Sx0hdl * (1 + np.sum((Imaxhdl*h0)/div))) - (dx*Chdl)
f = [dCldldt, dCcholdt, dCtrigdt, dChdldt]
return f
except Exception as e:
# print 'There was some problem with the differentialequations function: {}'.format(e)
print(dose.shape, t)
raise
def differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose):
'''
This function solves the differential equations with odeint
Inputs:
adherence: patient's adherence for all the statins, 2-d numpy array
t: timepoints
Sx0: synthesis terms for all the lipids
C0: baseline values for all the lipids
dose: doses for all the statins, 2-d numpy array
'''
try:
dx = math.log(2)/14
ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy')
chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy')
trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy')
hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy')
Imaxldl = ldl_eff[0]
Imaxchol = chol_eff[0]
Imaxtrig = trig_eff[0]
Imaxhdl = hdl_eff[0]
# Imaxldl, Imaxchol, Imaxtrig, Imaxhdl = np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0])
Ic50 = ldl_eff[1]
n = 0.7
I0 = [Cldl0, Cchol0, Ctrig0, Chdl0]
p = [adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl]
sol = odeint(differentialequations, I0, t, args = (p,))
# print(sol)
Cldl = []
Cchol = []
Ctrig = []
Chdl = []
for s1 in sol:
Cldl.append(s1[0])
Cchol.append(s1[1])
Ctrig.append(s1[2])
Chdl.append(s1[3])
# print(Cldl)
return Cldl, Cchol, Ctrig, Chdl
except Exception as e:
# print('There was some problem with the differential_solve function: {}'.format(e))
raise
def adherence_coding(adherence, periods):
''' This function takes the adherence and identifies where it is -1 and returns the pairs
of rows and columns, number of windows and the flag
Parameters
----------
adhrenece : {2-d numpy array for each patient}
It has the adherence values for all the medications for each day
periods_total : {1-d numpy array}
It has the
Returns
-------
[type]
[description]
'''
try:
# print(periods_total)
period_nonzero = periods[periods!=0]
row, col = np.where(adherence==-1)
pairs = list(map(list, zip(row, col)))
windows = len(np.where(np.roll(period_nonzero,1)!=period_nonzero)[0])
if windows == 0:
windows = 1
else:
windows = windows
return pairs, windows, period_nonzero
except Exception as e:
print('There was some problem with the adherence_coding function: {}'.format(e))
def adherence_guess(adherence, pairs, values, flag):
try:
for i in range(len(flag)):
l = pairs[i]
adherence[l[0]][l[1]] = values[flag[i]-1]
return adherence
except Exception as e:
# print 'There was some problem with the adherence_guess function: {}'.format(e)
raise
def h0_cal(dose, Imax, Ic50, n, adherence):
try:
h0 = (Imax*((dose*adherence)**n))/(Ic50 + ((dose*adherence)**n))
if all(np.isnan(h0)):
h0[:] = 0
h0_dictionary = {'Atorvastatin':h0[0], 'Fluvastatin':h0[1], 'Lovastatin':h0[2],
'Pravastatin':h0[3], 'Rosuvastatin':h0[4], 'Simvastatin':h0[5]}
# print(h0_dictionary)
return h0_dictionary
except Exception as e:
print('There was some problem with the h0_cal function: {}'.format(e))
def rmse_function(real_data,real_time,max_value, t, ode_solution):
try:
real_time = np.array(real_time)
weight = (1/max_value)**2
indices = []
for j in real_time:
k = np.where(t == j)[0][0]
# print(k)
indices.append(k)
ode_final_values = np.array(ode_solution)[indices]
# print(indices)
# quit()
# print(ode_final_values)
rmse = np.average(weight*((ode_final_values - np.array(real_data))**2))
return rmse
except Exception as e:
print('There was some problem with the rmse_function function: {}'.format(e))
def get_total_rmse_nonNorm(adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t):
try:
ldl_max = max(ldl)
tc_max = max(tc)
# if len(trig)>0:
# trig_max = max(trig)
# else:
# trig_max = 1
trig_max = 1 # max(trig)
hdl_max = 1 # max(hdl)
Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose)
rmse_ldl = rmse_function(ldl, t_ldl, 1, t, Cldl)
rmse_tc = rmse_function(tc, t_tc, 1, t, Cchol)
# rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig)
rmse_trig = 0
rmse_hdl = 0 #rmse_function(hdl, t_hdl, 1, t, Chdl)
rmse_total = rmse_ldl + rmse_tc + (rmse_trig * 0) + rmse_hdl
return rmse_total
except Exception as e:
# print 'There was some problem with the get_total_rmse function: {}'.format(e)
raise
def get_total_rmse(x, pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t,count, biomarker,pre_adherence,prestatin, statintype, statin_dose):
try:
values_adherence = x[0:windows]
if count > 0:
values_biomarker = x[windows:]
for i in range(count):
if biomarker[i] == 'ldl':
Cldl0 = values_biomarker[i]
if biomarker[i] == 'chol':
Cchol0 = values_biomarker[i]
if biomarker[i] == 'trig':
Ctrig0 = values_biomarker[i]
if biomarker[i] == 'hdl':
Chdl0 = values_biomarker[i]
if biomarker[i] == 'pre_adherence':
pre_adherence = values_biomarker[i]
if biomarker[i] == 'alpha':
alpha = values_biomarker[i]
if 'alpha' in biomarker:
Cldl0 = Cldl0 * alpha
Cchol0 = Cchol0 * alpha
Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence)
adherence = adherence_guess(adherence, pairs, values_adherence, period_nonzero)
ldl_max = max(ldl)
tc_max = max(tc)
# if len(trig)>0:
# trig_max = max(trig)
# else:
# trig_max = 1
trig_max = 1 #max(trig)
hdl_max = 1 #max(hdl)
Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose)
rmse_ldl = rmse_function(ldl, t_ldl, ldl_max, t, Cldl)
rmse_tc = rmse_function(tc, t_tc, tc_max, t, Cchol)
# rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig)
rmse_trig = 0
rmse_hdl = 0 #rmse_function(hdl, t_hdl, hdl_max, t, Chdl)
rmse_total = (1.2 * rmse_ldl) + rmse_tc + (rmse_trig * 0) +rmse_hdl
return rmse_total
except Exception as e:
# print 'There was some problem with the get_total_rmse function: {}'.format(e)
raise
def synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence):
try:
ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy')
chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy')
trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy')
hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy')
n = 0.7
dx = math.log(2)/14
if pd.isnull(Cldl0) | pd.isnull(Cchol0) | pd.isnull(Ctrig0) | pd.isnull(Chdl0):
print(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose)
Cldl0, Cchol0, Ctrig0, Chdl0 = baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose)
if prestatin:
Sx0ldl = (dx*Cldl0)/(1-h0_cal(statin_dose, ldl_eff[0], ldl_eff[1], n, pre_adherence)[statintype])
Sx0chol = (dx*Cchol0)/(1-h0_cal(statin_dose, chol_eff[0], chol_eff[1], n, pre_adherence)[statintype])
Sx0trig = (dx*Ctrig0)/(1-h0_cal(statin_dose, trig_eff[0], trig_eff[1], n, pre_adherence)[statintype])
Sx0hdl = (dx*Chdl0)/(1-h0_cal(statin_dose, hdl_eff[0], hdl_eff[1], n, pre_adherence)[statintype])
else:
Sx0ldl = (dx*Cldl0)
Sx0chol = (dx*Cchol0)
Sx0trig = (dx*Ctrig0)
Sx0hdl = (dx*Chdl0)
# print(Cldl0, Cchol0, Ctrig0, Chdl0)
return Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0
except Exception as e:
# print 'There was some problem with the synthesis_calculation function: {}'.format(e)
raise
def baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose):
try:
ldl = {'Atorvastatin': {'5': 0.31, '10': 0.37, '15': 0.40, '20': 0.43, '30': 0.46,'40': 0.49, '45': 0.50, '50': 0.51, '60': 0.52, '70': np.nan, '80': 0.55},
'Fluvastatin': {'5': 0.10, '10': 0.15, '15': np.nan, '20': 0.21, '30': np.nan, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33},
'Lovastatin': {'5': np.nan, '10': 0.21 , '15': np.nan, '20': 0.29, '30': 0.33, '40': 0.37, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45},
'Pravastatin': {'5': 0.15, '10': 0.2, '15': np.nan, '20': 0.24, '30': 0.27, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33},
'Rosuvastatin': {'5': 0.38, '10': 0.43, '15': 0.46, '20': 0.48, '30': 0.51, '40': 0.53, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.58},
'Simvastatin': {'5': 0.23, '10': 0.27, '15': 0.3, '20': 0.32, '30': 0.35, '40': 0.37, '45': 0.38, '50': 0.38, '60': 0.4, '70': 0.41, '80': 0.42}}
tc = {'Atorvastatin': {'5': 0.24, '10': 0.29, '15': 0.31, '20': 0.33, '30': 0.36, '40': 0.38, '45': 0.39, '50': 0.39, '60': 0.4, '70': np.nan, '80': 0.43},
'Fluvastatin': {'5': 0.07, '10': 0.12, '15': np.nan, '20': 0.17, '30': np.nan, '40': 0.21, '45': np.nan, '50': np.nan, '60': np.nan, '70':np.nan, '80': 0.26},
'Lovastatin': {'5': np.nan, '10': 0.17, '15': np.nan, '20': 0.23, '30': 0.26, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.35},
'Pravastatin': {'5': 0.12, '10': 0.15, '15': np.nan, '20': 0.19, '30': 0.21, '40': 0.22, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.26},
'Rosuvastatin': {'5': 0.3, '10': 0.34, '15': 0.36, '20': 0.38, '30': 0.39, '40': 0.41, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45},
'Simvastatin': {'5': 0.17, '10': 0.21, '15': 0.23, '20': 0.25, '30': 0.27, '40': 0.29, '45': np.nan, '50': 0.3, '60': 0.31, '70': 0.32, '80': 0.33}}
trig = {'Atorvastatin': {'5': 0.16, '10': 0.19, '15': 0.2, '20': 0.21, '30': 0.23, '40': 0.25, '45': 0.25, '50': 0.25, '60': 0.26, '70': np.nan, '80': 0.27},
'Fluvastatin': {'5': 0.05, '10': 0.08, '15': np.nan, '20': 0.11, '30': np.nan, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.16},
'Lovastatin': {'5': np.nan, '10': 0.11, '15': np.nan, '20': 0.15, '30': 0.16, '40': 0.18, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.22},
'Pravastatin': {'5': 0.08, '10': 0.10, '15': np.nan, '20': 0.12, '30': 0.13, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.17},
'Rosuvastatin': {'5': 0.19, '10': 0.22, '15': 0.23, '20': 0.24, '30': 0.25, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.29},
'Simvastatin': {'5': 0.11, '10': 0.14, '15': 0.15, '20': 0.16, '30': 0.17, '40': 0.18, '45': np.nan, '50': 0.19, '60': 0.20, '70': 0.20, '80': 0.21}}
hdl = {'Atorvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70':1.0, '80': 1.0},
'Fluvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Lovastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Pravastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Rosuvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Simvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}}
Cldl_prestatin = 4.78407034
Cchol_prestatin = 6.77527799
Ctrig_prestatin = 4.65168793
Chdl_prestatin = 1.81018878
if prestatin == False:
if pd.isnull(Cldl0):
Cldl0 = Cldl_prestatin
if pd.isnull(Cchol0):
Cchol0 = Cchol_prestatin
if pd.isnull(Ctrig0):
Ctrig0 = Ctrig_prestatin
if pd.isnull(Chdl0):
Chdl0 = Chdl_prestatin
if prestatin:
if ~(pd.isnull(statin_dose)):
statin_dose = str(int(statin_dose))
if pd.isnull(Cldl0):
Cldl0 = Cldl_prestatin * ldl[statintype][statin_dose]
if pd.isnull(Cchol0):
Cchol0 = Cchol_prestatin * tc[statintype][statin_dose]
if pd.isnull(Ctrig0):
Ctrig0 = Ctrig_prestatin * trig[statintype][statin_dose]
if pd.isnull(Chdl0):
Chdl0 = Chdl_prestatin * hdl[statintype][statin_dose]
return Cldl0, Cchol0, Ctrig0, Chdl0
except Exception as e:
print('There was some problem with the baseline_map function: {}'.format(e))
def optimize_callback(x, pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t,count, biomarker,pre_adherence,prestatin, statintype, statin_dose):
try:
values_adherence = x[0:windows]
if count > 0:
values_biomarker = x[windows:]
for i in range(count):
if biomarker[i] == 'ldl':
Cldl0 = values_biomarker[i]
if biomarker[i] == 'chol':
Cchol0 = values_biomarker[i]
if biomarker[i] == 'trig':
Ctrig0 = values_biomarker[i]
if biomarker[i] == 'hdl':
Chdl0 = values_biomarker[i]
if biomarker[i] == 'pre_adherence':
pre_adherence = values_biomarker[i]
if biomarker[i] == 'alpha':
alpha = values_biomarker[i]
if 'alpha' in biomarker:
Cldl0 = Cldl0 * alpha
Cchol0 = Cchol0 * alpha
Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence)
adherence = adherence_guess(adherence, pairs, values_adherence, period_nonzero)
ldl_max = max(ldl)
tc_max = max(tc)
# if len(trig)>0:
# trig_max = max(trig)
# else:
# trig_max = 1
trig_max = max(trig)
hdl_max = max(hdl)
Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose)
rmse_ldl = rmse_function(ldl, t_ldl, ldl_max, t, Cldl)
rmse_tc = rmse_function(tc, t_tc, tc_max, t, Cchol)
# rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig)
rmse_trig = 0
rmse_hdl = 0 #rmse_function(hdl, t_hdl, hdl_max, t, Chdl)
rmse_total = (1.2 * rmse_ldl) + rmse_tc + (rmse_trig * 0) +rmse_hdl
print(rmse_total)
return rmse_total
except Exception as e:
# print 'There was some problem with the get_total_rmse function: {}'.format(e)
raise
def optimize_params(pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose):
print('optimize_params')
try:
if (('ldl' not in biomarker) and ('chol' not in biomarker) and ('hdl' not in biomarker)):
alpha_lower = np.nanmax((0.1, Chdl0 / (Cchol0 - Cldl0)))
else:
print('else statement')
alpha_lower = 0.1
alpha_upper = 3.0
optimal_range = {'ldl' : {'lo': 1.292, 'hi':5.171},
'chol' : {'lo': 2.585, 'hi':9.05},
'trig' : {'lo': 1.129, 'hi':5.645},
'hdl' : {'lo': 0.775, 'hi':1.81},
'pre_adherence' : {'lo': 0.01, 'hi': 1.0},
'alpha' : {'lo': alpha_lower, 'hi': alpha_upper}
}
print('optimal range done')
low = []
high = []
for name in biomarker:
low.append(optimal_range[name]['lo'])
high.append(optimal_range[name]['hi'])
print('setting bounds')
npar = windows+count
bounds = np.zeros([npar,2])
bounds[:,0] = [0.01]*windows + low
bounds[:,1] = [1]*windows + high
# Convert bounds to list of tuples
boundsList = [tuple(bounds[i,:]) for i in range(bounds.shape[0])]
#solver = minimize(get_total_rmse, x0=np.mean(bounds, axis=1).tolist(), bounds=boundsList,
# args = (pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl,
# Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,
# t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose))
#best_rmse = np.inf
#all_vals = {}
#for i in range(1):
# result = solver.fun
# vals = solver.x
# if result < best_rmse:
# best_rmse, best_vals = result, vals
# all_vals[i] = {}
# all_vals[i]['Error'] = result
# all_vals[i]['params'] = list(vals)
solver = Differential_Evolution(obj_fun=get_total_rmse, bounds=bounds, parallel= True, npar=npar, npool=npar*8,
CR=0.85, strategy=2, fmin=0, fmax=2)
print(solver.parameter_number)
result = solver.optimize(args = [pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl,
Cldl0, Cchol0, Ctrig0 , Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,
t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose])
best_rmse, best_vals = result[:2]
return best_rmse, best_vals
except Exception as e:
# print 'There was some problem with the optimize_params function: {}'.format(e)
raise
def plotting(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, Cldl_f, Cchol_f, Ctrig_f, Chdl_f, t, p, tmax=1095):
try:
# fontP = FontProperties()
plt.style.use(['seaborn-white', 'seaborn-talk'])
sns.set_style('ticks', {'font.family': ['Times New Roman'], 'font.size': ['18']})
sns.set_context('talk', font_scale=1)
# fontP.set_size('24')
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(t_ldl, ldl, '*', color='teal', label='Real data', markersize = '24')
ax.plot(t, Cldl_f, color='darkblue', label='ODE Simulation')
ax.set_xlabel('Days from baseline')
ax.set_ylabel('LDL, mmol/L')
ax.set_xlim(0, tmax)
ax.legend(frameon=True, framealpha=0.7, fontsize = '18')
fig.tight_layout()
outdir = os.path.join(classConfig['outputPath'], 'LDL_Simulation')
if not os.path.exists(outdir):
os.makedirs(outdir)
fig.savefig('{}/Patient{}'.format(outdir, p))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(t_tc, tc, '*', color='teal', label='Real data', markersize = '24')
ax.plot(t, Cchol_f, color='darkblue', label='ODE Simulation')
ax.set_xlabel('Days from baseline')
ax.set_ylabel('Total cholesterol, mmol/L')
ax.set_xlim(0, tmax)
ax.legend(frameon=True, framealpha=0.7, fontsize = '18')
fig.tight_layout()
outdir = os.path.join(classConfig['outputPath'], 'Chol_Simulation')
if not os.path.exists(outdir):
os.makedirs(outdir)
fig.savefig('{}/Patient{}'.format(outdir, p))
# fig = plt.figure(figsize=(12,8))
# ax = fig.add_subplot(111)
# ax.plot(t_trig, trig, '*', color='teal', label='Real data', markersize = '24')
# ax.plot(t, Ctrig_f, color='darkblue', label='ODE Simulation')
# ax.set_xlabel('Days from baseline')
# ax.set_ylabel('Triglycerides, mmol/L')
# ax.set_xlim(0, 730)
# ax.legend(frameon=True, framealpha=0.7, fontsize = '18')
# fig.tight_layout()
# outdir = os.path.join(classConfig['outputPath'], 'Trig_Simulation')
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# fig.savefig('{}/Patient{}'.format(outdir, p))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(t_hdl, hdl, '*', color='teal', label='Real data', markersize = '24')
ax.plot(t, Chdl_f, color='darkblue', label='ODE Simulation')
ax.set_xlabel('Days from baseline')
ax.set_ylabel('HDL, mmol/L')
ax.set_xlim(0, tmax)
ax.legend(frameon=True, framealpha=0.7, fontsize = '18')
fig.tight_layout()
outdir = os.path.join(classConfig['outputPath'], 'HDL_Simulation')
if not os.path.exists(outdir):
os.makedirs(outdir)
fig.savefig('{}/Patient{}'.format(outdir, p))
plt.close('all')
except Exception as e:
# print 'There was some problem with the plotting function: {}'.format(e)
raise
def plotAdherence(adhData, scatterPoints, labels, outputFileName, tmax=1095):
try:
plt.style.use(['seaborn-white', 'seaborn-talk'])
sns.set_style('ticks', {'font.family': ['Times New Roman']})
sns.set_context('talk', font_scale=1)
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
for m in np.arange(0, adhData.shape[1]):
ax.plot(adhData[:,m], label=labels[m])
ax.scatter(np.arange(0, scatterPoints[:,m].shape[0]), scatterPoints[:,m])
ax.set_xlabel('Days from baseline')
ax.set_ylabel('Adherence')
ax.set_xlim(0, tmax)
ax.set_ylim(bottom=0)
ax.legend(frameon=True, framealpha=0.7)
fig.tight_layout()
outdir = os.path.join(classConfig['outputPath'], 'Adherence')
if not os.path.exists(outdir):
os.makedirs(outdir)
fig.savefig('{}/Adherence_{}.png'.format(outdir, outputFileName))
plt.close('all')
except Exception as e:
raise
def main_simulate(patientListPickleFile, **kwargs):
try:
tic = time.time()
patientsList = pd.read_pickle(str(patientListPickleFile))
patientList = np.array(patientsList['NRIC_X'].astype(str)) #Note the difference in names of the above two
patSN = pd.read_pickle('../../data/intermediate/patientSN_info.pkl')
patSN_dict = dict(patSN.apply(lambda x: tuple(x), axis=1).values)
# patientsToRun = ['1841', '1993', '2022', '2134', '2272', '2457', '2682', '3088', '3606', '3670',
# '2341', '2360', '2466', '2534', '2743', '2787', '2849', '3198', '4267', '4347']
# patientsToRun = ['2326']
rmseFinal = pd.DataFrame(columns=['NRIC_X', 'PatientSN', 'TotalRMSE_nonNorm'])
for p in patientList:
try:
print(p, patSN_dict)
p1 = patSN_dict[p]
# if p1 not in patientsToRun:
# continue
print('Loading data for patient {}'.format(p1))
myPatient = getPatientData(p, **kwargs)
myPatient.patientSN = p1
myPatient.loadOrigMedications()
adherence = myPatient.origMeds['statin']['adherence']
periods = myPatient.origMeds['statin']['periods']
dose = myPatient.origMeds['statin']['dose']
t_ldl, ldl = myPatient.biomarker(['LDL'])
t_tc, tc = myPatient.biomarker(['Cholesterol'])
t_trig, trig = myPatient.biomarker(['Triglycerides'])
t_hdl, hdl = myPatient.biomarker(['HDL'])
t_ldl_1 = []
ldl_1 = []
for i in range(len(t_ldl)):
t_ldl_1.append(t_ldl[i][0])
ldl_1.append(ldl[i][0])
t_tc_1 = []
tc_1 = []
for i in range(len(t_tc)):
t_tc_1.append(t_tc[i][0])
tc_1.append(tc[i][0])
t_trig_1 = []
trig_1 = []
for i in range(len(t_trig)):
t_trig_1.append(t_trig[i][0])
trig_1.append(trig[i][0])
t_hdl_1 = []
hdl_1 = []
for i in range(len(t_hdl)):
t_hdl_1.append(t_hdl[i][0])
hdl_1.append(hdl[i][0])
t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl = t_ldl_1, ldl_1, t_tc_1, tc_1, t_trig_1, trig_1, t_hdl_1, hdl_1
# print(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl)
print('Loading data for patient {}'.format(p1))
Cldl0 = myPatient.baseline(['LDL'])[0][0]
Cchol0 = myPatient.baseline(['Cholesterol'])[0][0]
Ctrig0 = myPatient.baseline(['Triglycerides'])[0][0]
Chdl0 = myPatient.baseline(['HDL'])[0][0]
# print(Cldl0, Chdl0, Cchol0, Ctrig0)
prestatin = myPatient.baseline(['Statin_Prior'])[0][0]
statintype = myPatient.baseline(['Statin_Prior_Type'])[0][0]
statin_dose = myPatient.baseline(['Statin_Prior_Dose'])[0][0]
# pre_adherence = myPatient.baseline(['Statin_Pre_Adherence'])[0][0]
pre_adherence = 0
ldl_pre = int(pd.isnull(Cldl0))
chol_pre = int(pd.isnull(Cchol0))
# trig_pre = int(pd.isnull(Ctrig0))
hdl_pre = int(pd.isnull(Chdl0))
if prestatin == 1:
pre_adherence = 1
# Added this so that lipid sim will be evaluated for each day
# t = np.sort(reduce(np.union1d, [t_ldl, t_tc, t_trig, t_hdl]))
t_max = 1095
t = np.arange(0, (t_max+1), 1)
# Load optimized values
myPatient.loadOptimizedMedications()
currAdherence = myPatient.optimizedMeds['statin']['adherence']
currDose = myPatient.optimizedMeds['statin']['dose']
currSxo = myPatient.optimizedMeds['statin']['Sxo']
currCxo = myPatient.optimizedMeds['statin']['Cxo']
total_rmse_nonNorm = get_total_rmse_nonNorm(currAdherence, currSxo['LDL'], currSxo['Cholesterol'], currSxo['Triglycerides'], currSxo['HDL'],
currCxo['LDL'], currCxo['Cholesterol'], currCxo['Triglycerides'], currCxo['HDL'], currDose,
t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, t)
rmseFinal = rmseFinal.append({'NRIC_X': p, 'PatientSN': p1, 'TotalRMSE_nonNorm': total_rmse_nonNorm}, ignore_index=True)
except Exception as e:
print('Patient cannot be processed: {}\n'.format(e))
continue
# raise
rmseFinal.to_pickle('{}/{}/TotalRMSE.pkl'.format(classConfig['savePath']['final'], classConfig['savePath']['optimizedMeds']))
except Exception as e:
print('There was some problem with the main function: {}'.format(e))
# raise
def main(patientList, **kwargs):
try:
tic = time.time()
#patientsList = pd.read_pickle(str(patientListPickleFile))
#print('patientsList', patientsList)
#patientList = [np.array(patientsList['NRIC_X'].astype(str))] #Note the difference in names of the above two
#patSN = pd.read_pickle('../../data/intermediate/patientSN_info.pkl')
#patSN_dict = dict(patSN.apply(lambda x: tuple(x), axis=1).values)
# patientsToRun = ['1841', '1993', '2022', '2134', '2272', '2457', '2682', '3088', '3606', '3670',
# '2341', '2360', '2466', '2534', '2743', '2787', '2849', '3198', '4267', '4347']
# patientsToRun = ['2326']
#print(patSN_dict)
print(patientList)
for p in patientList:
try:
print('p iterated')
p1 = p
# if p1 not in patientsToRun:
# continue
print('Loading data for patient {}'.format(p1))
myPatient = getPatientData(p, **kwargs)
print('initiated patient data')
myPatient.patientSN = p1
print('initiated patient SN')
myPatient.loadOrigMedications()
print('loadedOrigMeds')
print('Loaded patient {} medications'.format(p1))
adherence = myPatient.origMeds['statin']['adherence']
periods = myPatient.origMeds['statin']['periods']
dose = myPatient.origMeds['statin']['dose']
print(adherence.shape)
print('Loading biomarkers for patient {}'.format(p1))
print(myPatient.biomarker(['LDL']))
t_ldl, ldl = myPatient.biomarker(['LDL'])
t_tc, tc = myPatient.biomarker(['Cholesterol'])
t_trig, trig = myPatient.biomarker(['Triglycerides'])
t_hdl, hdl = myPatient.biomarker(['HDL'])
print('Loaded biomarkers for patient{}'.format(p1))
t_ldl_1 = []
ldl_1 = []
for i in range(len(t_ldl)):
t_ldl_1.append(t_ldl[i][0])
ldl_1.append(ldl[i][0])
print('loaded ldl')
t_tc_1 = []
tc_1 = []
for i in range(len(t_tc)):
t_tc_1.append(t_tc[i][0])
tc_1.append(tc[i][0])
print('loaded tc')
t_trig_1 = []
trig_1 = []
for i in range(len(t_trig)):
t_trig_1.append(t_trig[i][0])
trig_1.append(trig[i][0])
print('loaded trig')
t_hdl_1 = []
hdl_1 = []
for i in range(len(t_hdl)):
t_hdl_1.append(t_hdl[i][0])
hdl_1.append(hdl[i][0])
print('loaded hdl')
t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl = t_ldl_1, ldl_1, t_tc_1, tc_1, t_trig_1, trig_1, t_hdl_1, hdl_1
# print(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl)
print('Loading data for patient {} for baseline'.format(p1))
Cldl0 = myPatient.baseline(['LDL'])[0][0]
print('Cldl0', Cldl0)
Cchol0 = myPatient.baseline(['Cholesterol'])[0][0]
print('Cchol0', Cchol0)
Ctrig0 = myPatient.baseline(['Triglycerides'])[0][0]
print('Ctrig0', Ctrig0)
Chdl0 = myPatient.baseline(['HDL'])[0][0]
print('Chdl0', Chdl0)
# print(Cldl0, Chdl0, Cchol0, Ctrig0)
print('loading statins')
prestatin = myPatient.baseline(['Statin_Prior'])[0][0]
statintype = myPatient.baseline(['Statin_Prior_Type'])[0][0]
statin_dose = myPatient.baseline(['Statin_Prior_Dose'])[0][0]
# pre_adherence = myPatient.baseline(['Statin_Pre_Adherence'])[0][0]
pre_adherence = 0
print('loaded statins')
print('loading prebiomarkers')
ldl_pre = int(pd.isnull(Cldl0))
chol_pre = int( | pd.isnull(Cchol0) | pandas.isnull |
import numpy as np
import pandas as pd
from postproc_utils import rmse_masked, nse
def test_rmse_masked():
y_true = pd.Series([1, 5, 3, 4, 2])
y_pred = y_true.copy()
err = rmse_masked(y_true, y_pred)
assert err == 0
y_pred = pd.Series([0, 0, 0, 0, 0])
err = rmse_masked(y_true, y_pred)
assert round(err, 2) == 3.32
y_true = pd.Series([1, np.nan, 3, 4, 2])
err = rmse_masked(y_true, y_pred)
assert round(err, 2) == 2.74
def test_nse():
y_true = pd.Series([1, 5, 3, 4, 2])
y_pred = y_true.copy()
nse_samp = nse(y_true, y_pred)
assert nse_samp == 1
y_pred = pd.Series([1, 4, 0, 4, 2])
nse_samp = nse(y_true, y_pred)
assert nse_samp == 0
y_pred = | pd.Series([2, 4, 0, 4, 2]) | pandas.Series |
from pathlib import Path
from itertools import chain
from lxml import etree
from django.db import models
from django.db.models import F
from django.db.models import Max, Min, Sum
from django.shortcuts import render
from django.urls import reverse
from polymorphic.models import PolymorphicModel
import numpy as np
import pandas as pd
from collections import defaultdict
from scipy.special import expit
import gotoh
from dcodex.models import Manuscript, Verse, VerseLocation
from dcodex_bible.models import BibleVerse
from dcodex_bible.similarity import *
import dcodex.distance as distance
import logging
DEFAULT_LECTIONARY_VERSE_MASS = 50
def data_dir():
return Path(__file__).parent/"data"
class LectionaryVerse(Verse):
bible_verse = models.ForeignKey(BibleVerse, on_delete=models.CASCADE, default=None, null=True, blank=True )
unique_string = models.CharField(max_length=100, default="")
mass = models.PositiveIntegerField(default=0)
class Meta:
ordering = ('bible_verse',)
def save(self,*args,**kwargs):
# Check to see if ID is assigned
if self.mass == 0:
self.mass = self.bible_verse.char_count if self.bible_verse and self.bible_verse.char_count else DEFAULT_LECTIONARY_VERSE_MASS
super().save(*args,**kwargs)
def reference(self, abbreviation = False, end_verse=None):
if not self.bible_verse:
if abbreviation and "Heading" in self.unique_string:
return "Head"
return self.unique_string
if end_verse:
return "vv %d–%d" % (self.id, end_verse.id)
return self.bible_verse.reference( abbreviation )
# Override
@classmethod
def get_from_dict( cls, dictionary ):
return cls.get_from_values(
dictionary.get('verse_id', 1) )
# Override
@classmethod
def get_from_string( cls, verse_as_string ):
if verse_as_string.isdigit():
return cls.get_from_values( verse_as_string )
return cls.objects.filter( unique_string=verse_as_string ).first()
@classmethod
def get_from_values( cls, verse_id ):
try:
return cls.objects.filter( id=int(verse_id) ).first()
except:
return None
# Override
def url_ref(self):
return self.unique_string
def set_unique_string( self ):
if not self.bible_verse:
return
other_vv = LectionaryVerse.objects.filter( bible_verse=self.bible_verse )
if self.id:
other_vv = other_vv.filter( id__lt=self.id )
self.unique_string = self.bible_verse.reference_abbreviation().replace(" ", '')
count = other_vv.count()
if count > 0:
self.unique_string += "_%d" % (count+1)
return self.unique_string
@classmethod
def new_from_bible_verse( cls, bible_verse ):
try:
rank = 1 + cls.objects.aggregate( Max('rank') )['rank__max']
except:
rank = 1
lectionary_verse = cls( bible_verse=bible_verse, rank=rank)
lectionary_verse.set_unique_string()
lectionary_verse.save()
return lectionary_verse
@classmethod
def new_from_bible_verse_id( cls, bible_verse_id ):
bible_verse = BibleVerse.objects.get( id=bible_verse_id )
return cls.new_from_bible_verse( bible_verse )
def others_with_bible_verse( self ):
return LectionaryVerse.objects.filter( bible_verse=self.bible_verse ).exclude( id=self.id )
class Lection(models.Model):
verses = models.ManyToManyField(LectionaryVerse, through='LectionaryVerseMembership')
description = models.CharField(max_length=100)
first_verse_id = models.IntegerField(default=0)
first_bible_verse_id = models.IntegerField(default=0)
def save(self,*args,**kwargs):
# Check to see if ID is assigned
if not self.id:
return super().save(*args,**kwargs)
first_verse = self.verses.first()
if first_verse:
self.first_verse_id = first_verse.id
self.first_bible_verse_id = first_verse.bible_verse.id if first_verse.bible_verse else 0
return super().save(*args,**kwargs)
class Meta:
ordering = ['first_bible_verse_id','description']
def __str__(self):
return self.description
def description_max_chars( self, max_chars=40 ):
description = self.description
if max_chars < 6:
max_chars = 6
if len(description) < max_chars:
return description
return description[:max_chars-3] + "..."
def days(self):
field = 'day'
ids = {value[field] for value in LectionInSystem.objects.filter(lection=self).values(field) if value[field]}
return LectionaryDay.objects.get(id__in=ids) # Look for a more efficient way to do this query
def dates(self):
""" Deprecated: Use 'days' """
return self.days()
def description_with_days( self ):
description = self.description_max_chars()
days = self.days()
if len(days) == 0:
return description
return "%s (%s)" % (description, ", ".join( [str(day) for day in days] ) )
def description_with_dates( self ):
""" Deprecated: Use 'description_with_days' """
return self.description_with_days()
def verse_memberships(self):
return LectionaryVerseMembership.objects.filter( lection=self ).all()
def reset_verse_order(self):
for verse_order, verse_membership in enumerate(self.verse_memberships()):
verse_membership.order = verse_order
verse_membership.save()
# print(verse_membership)
def verse_ids(self):
return LectionaryVerseMembership.objects.filter(lection=self).values_list( 'verse__id', flat=True )
def first_verse_id_in_set( self, intersection_set ):
return LectionaryVerseMembership.objects.filter(lection=self, verse__id__in=intersection_set).values_list( 'verse__id', flat=True ).first()
# OLD CODE
for verse_id in self.verse_ids():
if verse_id in intersection_set:
return verse_id
return None
def last_verse_id_in_set( self, intersection_set ):
return LectionaryVerseMembership.objects.filter(lection=self, verse__id__in=intersection_set).reverse().values_list( 'verse__id', flat=True ).first()
# OLD CODE
for verse_id in LectionaryVerseMembership.objects.filter(lection=self).reverse().values_list( 'verse__id', flat=True ):
if verse_id in intersection_set:
return verse_id
return None
# Deprecated - use add_verses_from_passages_string
def add_verses_from_range( self, start_verse_string, end_verse_string, lection_descriptions_with_verses=[], create_verses=False ):
lection_bible_verse_start = BibleVerse.get_from_string( start_verse_string )
lection_bible_verse_end = BibleVerse.get_from_string( end_verse_string )
# Find verses in other lections to use for this lection
verses_from_other_lections = []
for lection_description_with_verses in lection_descriptions_with_verses:
#print("Finding lection:", lection_description_with_verses)
lection_with_verses = Lection.objects.get( description=lection_description_with_verses )
verses_from_other_lections += list( lection_with_verses.verses.all() )
# Add verses in order, use verses from other lections if present otherwise create them
for bible_verse_id in range(lection_bible_verse_start.id, lection_bible_verse_end.id + 1):
lectionary_verse = None
for verse_from_other_lections in verses_from_other_lections:
if verse_from_other_lections.bible_verse.id == bible_verse_id:
lectionary_verse = verse_from_other_lections
break
if lectionary_verse is None:
if create_verses == False:
print("Trying to create lection %s with range of verses from %s to %s using %s other lections but there are not the right number of verses. i..e %d != %d" %
(description, str(lection_bible_verse_start), str(lection_bible_verse_end), lection_descriptions_with_verses, lection.verses.count(), lection_bible_verse_end.id-lection_bible_verse_start.id + 1 ) )
sys.exit()
lectionary_verse = LectionaryVerse.new_from_bible_verse_id( bible_verse_id )
self.verses.add(lectionary_verse)
self.save()
def add_verses_from_passages_string( self, passages_string, overlapping_lection_descriptions=[], overlapping_verses = [], overlapping_lections = [], create_verses=True ):
bible_verses = BibleVerse.get_verses_from_string( passages_string )
# Find verses in other lections to use for this lection
overlapping_lections += [Lection.objects.get( description=description ) for description in overlapping_lection_descriptions]
for overlapping_lection in overlapping_lections:
overlapping_verses += list( overlapping_lection.verses.all() )
# Add verses in order, use verses from other lections if present otherwise create them
for bible_verse in bible_verses:
lectionary_verse = None
for overlapping_verse in overlapping_verses:
if overlapping_verse.bible_verse and overlapping_verse.bible_verse.id == bible_verse.id:
lectionary_verse = overlapping_verse
break
if lectionary_verse is None:
if create_verses == False:
raise Exception( "Failed Trying to create lection %s using %s other lections but there are not the right number of verses." % (passages_string,overlapping_verses) )
lectionary_verse = LectionaryVerse.new_from_bible_verse_id( bible_verse.id )
self.verses.add(lectionary_verse)
self.save()
@classmethod
def update_or_create_from_description( cls, description, start_verse_string, end_verse_string, lection_descriptions_with_verses=[], create_verses=False ):
lection, created = cls.objects.get_or_create(description=description)
if created == False:
return lection
lection.verses.clear()
lection.add_verses_from_range( start_verse_string, end_verse_string, lection_descriptions_with_verses, create_verses )
lection.maintenance()
return lection
@classmethod
def update_or_create_from_passages_string( cls, passages_string, lection_descriptions_with_verses=[], create_verses=False ):
lection, created = cls.objects.get_or_create(description=passages_string)
if created == False:
return lection
lection.verses.clear()
lection.add_verses_from_passages_string( passages_string, overlapping_lection_descriptions=lection_descriptions_with_verses, create_verses=create_verses )
lection.maintenance()
return lection
@classmethod
def create_from_passages_string( cls, passages_string, **kwargs ):
lection = cls(description=passages_string)
lection.save()
lection.add_verses_from_passages_string( passages_string, **kwargs )
lection.save()
return lection
def first_verse(self):
return self.verses.first()
def calculate_mass(self):
mass = self.verses.aggregate( Sum('mass') ).get('mass__sum')
return mass
def maintenance(self):
cumulative_mass_from_lection_start = 0
for verse_membership in self.verse_memberships():
verse_membership.cumulative_mass_from_lection_start = cumulative_mass_from_lection_start
verse_membership.save()
cumulative_mass_from_lection_start += verse_membership.verse.mass
class LectionaryVerseMembership(models.Model):
lection = models.ForeignKey(Lection, on_delete=models.CASCADE)
verse = models.ForeignKey(LectionaryVerse, on_delete=models.CASCADE)
order = models.IntegerField(default=0)
cumulative_mass_from_lection_start = models.IntegerField(default=0, help_text="The total mass of verses from the beginning of the lection until this verse")
class Meta:
ordering = ['order','verse__bible_verse']
def __str__(self):
return "%d: %s in %s" % (self.order, self.verse, self.lection)
class FixedDate(models.Model):
"""
A liturgical day that corresponds to a fixed date in the calendar.
Because DateTime fields in Django need to be for a particular year, the year chosen was 1003 for September to December and 1004 for January to August. This year was chosen simply because 1004 is a leap year and so includes February 29.
"""
description = models.CharField(max_length=100)
date = models.DateField(default=None,null=True, blank=True)
def __str__(self):
return self.description
@classmethod
def get_with_string( cls, date_string ):
from dateutil import parser
dt = parser.parse( date_string )
year = 1003 if dt.month >= 9 else 1004
dt = dt.replace(year=year)
#print(dt, date_string)
return cls.objects.filter( date=dt ).first()
class Meta:
ordering = ('date','description')
class LectionaryDay(PolymorphicModel):
pass
class MiscDay(LectionaryDay):
description = models.CharField(max_length=255)
def __str__(self):
return self.description
class Meta:
ordering = ('description',)
class EothinaDay(LectionaryDay):
rank = models.IntegerField()
def __str__(self):
return f"Eothina {self.rank}"
class Meta:
ordering = ('rank',)
class FixedDay(LectionaryDay):
"""
A lectionary day that corresponds to a fixed date in the calendar.
Because DateTime fields in Django need to be for a particular year,
the year chosen was 1003 for September to December and 1004 for January to August.
This year was chosen simply because 1004 is a leap year and so includes February 29.
"""
date = models.DateField(default=None,null=True, blank=True)
def __str__(self):
return self.date.strftime('%b %d')
@classmethod
def get_with_string( cls, date_string ):
from dateutil import parser
dt = parser.parse( date_string )
year = 1003 if dt.month >= 9 else 1004
dt = dt.replace(year=year)
return cls.objects.filter( date=dt ).first()
class Meta:
ordering = ('date',)
class MovableDay(LectionaryDay):
SUNDAY = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
DAY_CHOICES = [
(SUNDAY, 'Sunday'),
(MONDAY, 'Monday'),
(TUESDAY, 'Tuesday'),
(WEDNESDAY, 'Wednesday'),
(THURSDAY, 'Thursday'),
(FRIDAY, 'Friday'),
(SATURDAY, 'Saturday'),
]
DAY_ABBREVIATIONS = [
(SUNDAY, 'Sun'),
(MONDAY, 'Mon'),
(TUESDAY, 'Tues'),
(WEDNESDAY, 'Wed'),
(THURSDAY, 'Th'),
(FRIDAY, 'Fri'),
(SATURDAY, 'Sat'),
]
day_of_week = models.IntegerField(choices=DAY_CHOICES)
EASTER = 'E'
PENTECOST = 'P'
FEAST_OF_THE_CROSS = 'F'
LENT = 'L'
GREAT_WEEK = 'G'
EPIPHANY = 'T'
SEASON_CHOICES = [
(EASTER, 'Easter'),
(PENTECOST, 'Pentecost'),
(FEAST_OF_THE_CROSS, 'Feast of the Cross'),
(LENT, 'Lent'),
(GREAT_WEEK, 'Great Week'),
(EPIPHANY, 'Epiphany'),
]
season = models.CharField(max_length=1, choices=SEASON_CHOICES)
week = models.CharField(max_length=31, blank=True, default="")
weekday_number = models.CharField(max_length=32, blank=True, default="")
earliest_date = models.CharField(max_length=15, blank=True, default="")
latest_date = models.CharField(max_length=15, blank=True, default="")
rank = models.PositiveIntegerField(default=0, blank=False, null=False)
class Meta:
ordering = ('rank','id')
def description_str( self, abbreviation = False ):
day_choices = self.DAY_ABBREVIATIONS if abbreviation else DAY_CHOICES
string = "%s: %s" % (self.get_season_display(), day_choices[self.day_of_week][1] )
if self.week.isdigit():
string += ", Week %s" % (self.week )
elif self.week != "Holy Week":
string += ", %s" % (self.week )
if abbreviation:
string = string.replace("Week", "Wk")
string = string.replace("Feast of the Cross", "Cross")
string = string.replace(" Fare", "")
return string
def __str__(self):
return self.description_str(True)
@classmethod
def read_season(cls, target):
target = target.lower().strip()
for season, season_string in cls.SEASON_CHOICES:
if season_string.lower().startswith(target):
return season
if target.startswith("cross"):
return cls.FEAST_OF_THE_CROSS
if target.startswith("theoph"):
return cls.EPIPHANY
return None
@classmethod
def read_day_of_week(cls, target):
target = target.lower().strip()
for day, day_abbreviation in cls.DAY_ABBREVIATIONS:
if target.startswith(day_abbreviation.lower()):
return day
return None
class DayOfYear(models.Model):
"DEPRECATED. See Moveable Day."
SUNDAY = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
DAY_CHOICES = [
(SUNDAY, 'Sunday'),
(MONDAY, 'Monday'),
(TUESDAY, 'Tuesday'),
(WEDNESDAY, 'Wednesday'),
(THURSDAY, 'Thursday'),
(FRIDAY, 'Friday'),
(SATURDAY, 'Saturday'),
]
DAY_ABBREVIATIONS = [
(SUNDAY, 'Sun'),
(MONDAY, 'Mon'),
(TUESDAY, 'Tues'),
(WEDNESDAY, 'Wed'),
(THURSDAY, 'Th'),
(FRIDAY, 'Fri'),
(SATURDAY, 'Sat'),
]
day_of_week = models.IntegerField(choices=DAY_CHOICES)
EASTER = 'E'
PENTECOST = 'P'
FEAST_OF_THE_CROSS = 'F'
LENT = 'L'
GREAT_WEEK = 'G'
EPIPHANY = 'T'
PERIOD_CHOICES = [
(EASTER, 'Easter'),
(PENTECOST, 'Pentecost'),
(FEAST_OF_THE_CROSS, 'Feast of the Cross'),
(LENT, 'Lent'),
(GREAT_WEEK, 'Great Week'),
(EPIPHANY, 'Epiphany'),
]
period = models.CharField(max_length=1, choices=PERIOD_CHOICES)
week = models.CharField(max_length=31)
weekday_number = models.CharField(max_length=32)
earliest_date = models.CharField(max_length=15)
latest_date = models.CharField(max_length=15)
description = models.CharField(max_length=255)
def description_str( self, abbreviation = False ):
day_choices = self.DAY_ABBREVIATIONS if abbreviation else DAY_CHOICES
string = "%s: %s" % (self.get_period_display(), day_choices[self.day_of_week][1] )
if self.week.isdigit():
string += ", Week %s" % (self.week )
elif self.week != "Holy Week":
string += ", %s" % (self.week )
if abbreviation:
string = string.replace("Week", "Wk")
string = string.replace("Feast of the Cross", "Cross")
string = string.replace(" Fare", "")
return string
def __str__(self):
return self.description_str(True)
class Meta:
verbose_name_plural = 'Days of year'
@classmethod
def read_period(cls, target):
target = target.lower().strip()
for period, period_string in cls.PERIOD_CHOICES:
if period_string.lower().startswith(target):
return period
return None
class LectionInSystem(models.Model):
lection = models.ForeignKey(Lection, on_delete=models.CASCADE, default=None, null=True, blank=True)
system = models.ForeignKey('LectionarySystem', on_delete=models.CASCADE)
day_of_year = models.ForeignKey(DayOfYear, on_delete=models.CASCADE, default=None, null=True, blank=True) # Deprecated
fixed_date = models.ForeignKey(FixedDate, on_delete=models.CASCADE, default=None, null=True, blank=True) # Deprecated
day = models.ForeignKey(LectionaryDay, on_delete=models.CASCADE, default=None, null=True, blank=True)
order_on_day = models.IntegerField(default=0) # Deprecated
cumulative_mass_lections = models.IntegerField(default=-1) # The mass of all the previous lections until the start of this one
order = models.IntegerField(default=0)
reference_text_en = models.TextField(default="", blank=True)
incipit = models.TextField(default="", blank=True)
reference_membership = models.ForeignKey('LectionInSystem', on_delete=models.CASCADE, default=None, null=True, blank=True)
occasion_text = models.TextField(default="", blank=True)
occasion_text_en = models.TextField(default="", blank=True)
def __str__(self):
return "%s in %s on %s" % ( str(self.lection), str(self.system), self.day_description() )
def clone_to_system( self, new_system ):
return LectionInSystem.objects.get_or_create(
system=new_system,
lection=self.lection,
order=self.order,
day=self.day,
cumulative_mass_lections=self.cumulative_mass_lections,
incipit=self.incipit,
reference_text_en=self.reference_text_en,
reference_membership=self.reference_membership,
)
def day_description(self):
if self.order_on_day < 2:
return str(self.day)
return "%s (%d)" % (str(self.day), self.order_on_day)
def description(self):
return "%s. %s" % (self.day_description(), str(self.lection) )
def description_max_chars( self, max_chars=40 ):
description = self.description()
if max_chars < 6:
max_chars = 6
if len(description) < max_chars:
return description
return description[:max_chars-3] + "..."
class Meta:
ordering = ('order', 'day', 'order_on_day',)
def prev(self):
return self.system.prev_lection_in_system( self )
def next(self):
return self.system.next_lection_in_system( self )
def cumulative_mass_of_verse( self, verse ):
mass = self.cumulative_mass_lections
verse_membership = LectionaryVerseMembership.objects.filter( lection=self.lection, verse=verse ).first()
cumulative_mass_verses = LectionaryVerseMembership.objects.filter( lection=self.lection, order__lt=verse_membership.order ).aggregate( Sum('verse__mass') ).get('verse__mass__sum')
if cumulative_mass_verses:
mass += cumulative_mass_verses
return mass
class LectionarySystem(models.Model):
name = models.CharField(max_length=200)
lections = models.ManyToManyField(Lection, through=LectionInSystem)
def __str__(self):
return self.name
def first_lection_in_system(self):
return self.lections_in_system().first()
def last_lection_in_system(self):
return self.lections_in_system().last()
def first_lection(self):
first_lection_in_system = self.first_lection_in_system()
return first_lection_in_system.lection
def first_verse(self):
first_lection = self.first_lection()
return first_lection.first_verse()
def maintenance(self):
self.reset_order()
self.calculate_masses()
def find_movable_day( self, **kwargs ):
day = MovableDay.objects.filter(**kwargs).first()
print('Day:', day)
if day:
return LectionInSystem.objects.filter(system=self, day=day).first()
return None
def find_fixed_day( self, last=False, **kwargs ):
memberships = self.find_fixed_day_all(**kwargs)
if not memberships:
return None
if last:
return memberships.last()
return memberships.first()
def find_fixed_day_all( self, **kwargs ):
date = FixedDay.objects.filter(**kwargs).first()
if date:
return LectionInSystem.objects.filter(system=self, day=date).all()
return None
def reset_order(self):
lection_memberships = self.lections_in_system()
for order, lection_membership in enumerate(lection_memberships.all()):
lection_membership.order = order
lection_membership.save()
lection_membership.lection.reset_verse_order()
def lections_in_system(self):
return LectionInSystem.objects.filter(system=self)
def lections_in_system_min_verses(self, min_verses=2):
return [m for m in self.lections_in_system().all() if m.lection.verses.count() >= min_verses]
def export_csv(self, filename) -> pd.DataFrame:
"""
Exports the lectionary system as a CSV.
Returns the lectionary system as a dataframe.
"""
df = self.dataframe()
df.to_csv(filename)
return df
def dataframe(self) -> pd.DataFrame:
"""
Returns the lectionary system as a pandas dataframe.
"""
data = []
columns = ["lection", 'season', 'week', 'day']
for lection_membership in self.lections_in_system():
if type(lection_membership.day) != MovableDay:
raise NotImplementedError(f"Cannot yet export for days of type {type(lection_membership.day)}.")
data.append(
[
lection_membership.lection.description,
lection_membership.day.get_season_display(),
lection_membership.day.week,
lection_membership.day.get_day_of_week_display(),
]
)
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
from ..heuristics.spacy_base import SpacyModel
from sklearn.base import BaseEstimator, TransformerMixin
from tqdm import tqdm
import pandas as pd
from attrdict import AttrDict
class Tokenizer(BaseEstimator, TransformerMixin):
def __init__(self, tokenizer):
self.tokenizer = SpacyModel(tokenizer)
def fit(self, X):
return self
def transform(self, X):
try:
res = []
for idx, row in tqdm(X.iterrows(), total=len(X)):
res.append(self.tokenizer.tokenize(**row)[1:])
res = pd.DataFrame(res, columns=['tokens', 'pronoun_offset_token',
'a_offset_token', 'b_offset_token', 'a_span',
'b_span', 'pronoun_token', 'a_tokens', 'b_tokens'])
cols = set(X.columns).difference(res.columns)
X = | pd.concat([X[cols], res], axis=1) | pandas.concat |
"""
Measuring the performance of key functionality.
:author: <NAME>
"""
import functools
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
from time import perf_counter
import klib
# Paths
base_path = Path(__file__).resolve().parents[2]
print(base_path)
data_path = base_path / "examples"
export_path = base_path / "klib/scripts/"
# Data Import
filepath = data_path / "NFL_DATASET.csv"
data = | pd.read_csv(filepath) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
| tm.assert_almost_equal(result, exp) | pandas.util.testing.assert_almost_equal |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xlrd
import openpyxl
import unicodecsv as csv
from math import log10, floor
from pandas.api.types import is_string_dtype
import pandas as pd
import numpy as np
import six
import six.moves
import orjson as json
from plaidcloud.rpc import utc
from plaidcloud.rpc.connection.jsonrpc import SimpleRPC
from plaidcloud.rpc.rpc_connect import Connect
from plaidcloud.utilities.query import Connection, Table
from plaidcloud.utilities import data_helpers as dh
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2013-2021, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
CSV_TYPE_DELIMITER = '::'
class ContainerLogger(object):
def info(self, msg):
print(msg, file=sys.stderr)
def debug(self, msg):
self.info(msg)
def exception(self, msg=None):
print(traceback.format_exc(), file=sys.stderr)
if msg is not None:
print(msg, file=sys.stderr)
logger = ContainerLogger()
def sql_from_dtype(dtype):
"""Returns a sql datatype given a pandas datatype
Args:
dtype (str): The pandas datatype to convert
Returns:
str: the equivalent SQL datatype
Examples:
>>> sql_from_dtype('bool')
'boolean'
>>> sql_from_dtype('float64')
'numeric'
>>> sql_from_dtype('number')
'numeric'
>>> sql_from_dtype('varchar(123)')
'text'
>>> sql_from_dtype('char(3)')
'text'
>>> sql_from_dtype('xml')
'text'
>>> sql_from_dtype('bytea')
'largebinary'
"""
mapping = {
'bool': 'boolean',
'boolean': 'boolean',
's8': 'text',
's16': 'text',
's32': 'text',
's64': 'text',
's128': 'text',
's256': 'text',
'object': 'text',
's512': 'text',
's1024': 'text',
'text': 'text',
'string': 'text',
'int8': 'smallint', # 2 bytes
'int16': 'integer',
'smallint': 'smallint',
'int32': 'integer', # 4 bytes
'integer': 'integer',
'int64': 'bigint', # 8 bytes
'bigint': 'bigint',
'float8': 'numeric',
'float16': 'numeric', # variable but ensures precision
'float32': 'numeric', # variable but ensures precision
'float64': 'numeric', # variable but ensures precision
'numeric': 'numeric',
'serial': 'serial',
'bigserial': 'bigserial',
'datetime64[s]': 'timestamp', # This may have to cover all datettimes
'datetime64[d]': 'timestamp',
'datetime64[ns]': 'timestamp',
'timestamp': 'timestamp',
'timestamp without time zone': 'timestamp',
'timedelta64[s]': 'interval', # This may have to cover all timedeltas
'timedelta64[d]': 'interval',
'timedelta64[ns]': 'interval',
'interval': 'interval',
'date': 'date',
'time': 'time',
'binary': 'largebinary',
'bytea': 'largebinary',
'largebinary': 'largebinary',
'xml': 'text',
'uuid': 'text',
'money': 'numeric',
'real': 'numeric',
'json': 'text',
'cidr': 'text',
'inet': 'text',
'macaddr': 'text',
}
dtype = str(dtype).lower()
if dtype.startswith('num'):
dtype = 'numeric'
elif 'char' in dtype:
dtype = 'text'
return mapping[dtype]
def save_typed_psv(df, outfile, sep='|', **kwargs):
"""Saves a typed psv, from a pandas dataframe. Types are analyze compatible
sql types, written in the header, like {column_name}::{column_type}, ...
Args:
df (`pandas.DataFrame`): The dataframe to create the psv from
outfile (file object or str): The path to save the output file to
sep (str, optional): The separator to use in the output file
"""
# ADT2017: _write_copy_from did something special with datetimes, but I'm
# not sure it's necessary, so I'm leaving it out.
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
column_names = [cleaned(n) for n in list(df)]
column_types = [sql_from_dtype(d) for d in df.dtypes]
header = [
CSV_TYPE_DELIMITER.join((name, sqltype))
for name, sqltype in six.moves.zip(column_names, column_types)
]
df.to_csv(outfile, header=header, index=False, sep=sep)
def list_of_dicts_to_typed_psv(lod, outfile, types, fieldnames=None, sep='|'):
""" Saves a list of dicts as a typed psv. Needs a dict of sql types. If
provided, fieldnames will specify the column order.
Args:
lod (:type:`list` of :type:`dict`): The list of dicts containing the data
to use to create the psv
outfile (str): The path to save the output file to, including file name
types (dict): a dict with column names as the keys and column datatypes as
the values
fieldnames (:type:`list` of :type:`str`, optional): A list of the field names.
If none is provided, defaults to the keys in `types`
sep (str): The separator to use in the output file
"""
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
header = {
name: CSV_TYPE_DELIMITER.join((cleaned(name), sqltype))
for name, sqltype in types.items()
}
if fieldnames is None:
# Caller doesn't care about the order
fieldnames = list(types.keys())
if isinstance(outfile, six.string_types):
buf = open(outfile, 'wb')
else:
buf = outfile
try:
writer = csv.DictWriter(buf, fieldnames=fieldnames, delimiter=sep)
writer.writerow(header) # It's not just the keys, so we're not using writeheader
for row in lod:
writer.writerow(row)
finally:
if isinstance(outfile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def get_project_variables(token, uri, project_id):
"""It opens a connection to Analyze and then
gets vars for a given project
Args:
token (str): oAuth token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
project_id (str): Id of the Project for which to grab the variables
Returns:
dict: Variables as key/values
"""
rpc = SimpleRPC(token, uri, verify_ssl=True)
try:
project_vars = rpc.analyze.project.variables(project_id=project_id)
except:
project_vars = rpc.analyze.project.variables(project=project_id)
return {pv['id']: pv['value'] for pv in project_vars}
def download(tables, configuration=None, retries=5, conn=None, clean=False, **kwargs):
"""This replaces the old get_tables() that was client-specific.
It opens a connection to Analyze and then
accepts a set of tables and saves them off to a local location.
For now, tables are understood to be typed psv's, but that can expand to
suit the need of the application (for instance, Excel.)
Args:
tables (set or list): table paths to retrieve (for backwards compatibility, you can leave off the initial '/')
token (str): token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
local_storage_path (str): local path where files should be saved. Would typically originate
from a local config.
**kwargs:
config (dict) contains a dict of config settings
token (str) simpleRFC authorization token
uri (str): uri e.g. 'https://ci.plaidcloud.com/json-rpc/'
local_storage_path (str) Target for files being saved
Returns:
The return value of function. If retries are exhausted, raises the
final Exception.
Examples:
"""
# TODO: if configuration is None, revert to **kwargs for the params we need.
if not conn:
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
try:
return_df = configuration['return_df']
except:
return_df = True
try:
project_id = configuration['project_id']
except:
project_id = conn.project_id
dfs = []
for table in tables:
table_path = table.get('table_name')
query = table.get('query')
table_obj = table.get('table_object')
df = None # Initial value
# wipe this out each time through
clean_df = pd.DataFrame()
logger.debug("Attempting to download {0}...".format(table_path))
tries = 1
if table_obj is not None:
# RPC table object exists; proceed to use it to fetch data
while tries <= retries:
if query is None:
# no query passed. fetch whole table
df = conn.get_dataframe(table_obj, clean=clean)
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
elif isinstance(query, six.string_types):
# query object passed in. execute it
try:
df = conn.get_dataframe_by_querystring(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
else:
# query object passed in. execute it
try:
df = conn.get_dataframe_by_query(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
tries += 1
columns = table_obj.cols()
if columns:
if isinstance(df, pd.core.frame.DataFrame):
cols = [c['id'] for c in columns if c['id'] in df.columns.tolist()]
df = df[cols] # this ensures that the column order is as expected
else:
cols = [c['id'] for c in columns]
df = pd.DataFrame(columns=cols) # create empty dataframe with expected metadata/shape
else:
if not table_path.startswith('/'):
table_path = '/{}'.format(table_path)
table_result = None
while not table_result and tries <= retries:
tries += 1
try:
table_result = conn.analyze.table.table(project_id=project_id, table_path=table_path)
logger.debug("Downloaded {0}...".format(table_path))
break
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
df = table_result_to_df(table_result or pd.DataFrame())
if not isinstance(df, pd.core.frame.DataFrame):
logger.exception('Table {0} failed to download!'.format(table_path))
elif len(df.columns) == 0:
logger.exception('Table {0} downloaded 0 records!'.format(table_path))
else:
if clean and query:
# Use the old cleaning process for things other than the full query.
clean_df = dh.clean_frame(df)
else:
clean_df = df
dfs.append({'df': clean_df, 'name': table_path})
return dfs
def load(source_tables, fetch=True, cache_locally=False, configuration=None, conn=None, clean=False):
"""Load frame(s) from requested source, returning a list of dicts
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, typed_df[col]))
return typed_df
def dwim_save(df, name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, saves a dataframe as an analyze table.
Otherwise saves it as a typed psv in localdir.
Args:
df (`pandas.DataFrame`): The dataframe to save
name (str): The name to save this dataframe as
localdir (str, optional): The local path to save the typed psv
lvl (str, optional): What level (project/model) the table should be
extension (str, optional): What file extension to give the output file
sep (str, optional): The separator to use in the output file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import save, save_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
save_fn = {
'model': save,
'project': save_project,
}[lvl]
save_fn(df, name)
except ImportError:
# We must not be on an app server, so save as typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
save_typed_psv(df, path, sep)
def dwim_load(name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, loads an analyze table.
Otherwise loads a typed psv from localdir.
Args:
name (str): The name of the table or file to load
localdir (str, optional): The path to the directory where the local file is stored
lvl (str, optional): The level (model/project) of the table to load
extension (str, optional): The flie extension of the local file
sep (str, optional): The separator used in the local file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import load, load_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
load_fn = {
'model': load,
'project': load_project,
}[lvl]
return load_fn(name)
except ImportError:
# We must not be on an app server, so load from typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
return load_typed_psv(path, sep)
def clean_uuid(id):
"""Removes any invalid characters from a UUID and ensures it is 32 or 36 characters
Args:
id (str): The ID to clean
Returns:
str: `id` with any invalid characters removed
"""
# !! WARNING: If you're calling this in new code, make sure it's really what you
# !! want. It used to remove dashes. That turned out to be a bad idea. Now
# !! it leaves dashes in.
#
# !! If you've found a bug related to dashes being left in, and this is
# !! being called on lookup, you should probably just remove the call to
# !! clean_uuid. Going forward, we don't remove dashes.
if id is None:
return None
name = six.text_type(id).lower()
valid_chars = '0123456789abcdef-'
cleaned_id = u''.join(n for n in name if n in valid_chars)
if '-' in cleaned_id:
if len(cleaned_id) != 36:
raise Exception("Could not clean id {}. Not 36 characters long.".format(id))
else:
if len(cleaned_id) != 32:
raise Exception("Could not clean id {}. Not 32 characters long.".format(id))
return cleaned_id
def clean_name(name):
"""
DEPRECATED: does nothing
Removes any invalid characters from a name and limits it to 63 characters
Args:
name (str): The name to clean
Returns:
str: The cleaned version of `name`
"""
return name
def clean_filename(name):
"""Remove '/' from a name
Args:
name (str): the filename to clean
Returns:
str: the cleaned version of `name`
"""
if name is None:
return None
# everything's fine except /
return six.text_type(name).translate({'/': None})
def describe(df):
"""Shorthand for df.describe()
Args:
df (`pandas.DataFrame`): The dataframe to describe
Returns:
summary: Series/DataFrame of summary statistics
"""
return df.describe()
def unique_values(df, column):
"""Returns unique values in the provided column
Args:
df (`pandas.DataFrame`): The DataFrame containing data
column (str): The column to find unique values in
Returns:
list: The unique values in the column
"""
return df[column].unique()
def count_unique(group_by, count_column, df):
"""Returns a count of unique items in a dataframe
Args:
group_by (str): The group by statement to apply to the dataframe
count_column (str): The column to count unique records in
df (`pandas.DataFrame`): The DataFrame containing the data
Returns:
int: The count of unique items in the specified column after grouping
"""
return df.groupby(group_by)[count_column].apply(lambda x: len(x.unique()))
def sum(group_by, df):
return df.groupby(group_by).sum()
def std(group_by, df):
return df.groupby(group_by).std()
def mean(group_by, df):
return df.groupby(group_by).mean()
def count(group_by, df):
return df.groupby(group_by).count()
def inner_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps only matches
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='inner')
def outer_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps data from both frames and matches up using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='outer')
def left_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from left frame and any matches in right using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='left')
def right_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from right frame and any matches in left using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='right')
def anti_join(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from left frame that is not found in right frame
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
indicator_status = False
indicator_name = '_merge'
left_cols = left_frame.columns
# avoid collision with pd generated indicator name
while not indicator_status:
if indicator_name in left_cols:
indicator_name = '_' + indicator_name
else:
indicator_status = True
df = pd.merge(left_frame, right_frame[right_on], how='left', left_on=left_on, right_on=right_on, indicator=indicator_name)
df = df[df[indicator_name] == 'left_only']
del df[indicator_name]
return df
def compare(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from right frame and any matches in left using the on_columns"""
#20180420 PBB Is "compare" a good name for this, it's basically a right-join in SQL terms?
#20180420 MWR It's quite old legacy. Not sure this one has ever been used for anything. Perhaps
# we can just do away with it.
if right_on is None:
right_on = left_on
return | pd.merge(left_frame, right_frame, left_on=left_on, right_on=right_on, how='outer') | pandas.merge |
"""
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = | pd.DataFrame(extensive.T, columns=extensive_variables) | pandas.DataFrame |
import itertools
import numpy as np
import pandas as pd
def F_score(v, y_label):
x_0 = 0
x_1 = 0
v_pos = v[y_label > 0]
v_neg = v[y_label <= 0]
v_ave = np.mean(v)
v_pos_ave = np.mean(v_pos)
v_neg_ave = np.mean(v_neg)
len_pos = len(v_pos)
len_neg = len(v_neg)
for i in range(len_pos):
x_0 += (v_pos[i] - v_pos_ave) ** 2
for j in range(len_neg):
x_1 += (v_neg[i] - v_neg_ave) ** 2
f_score = ((v_pos_ave - v_ave) ** 2 + (v_neg_ave - v_ave) ** 2) / (
(1 / (len_pos - 1)) * x_0 + (1 / (len_neg - 1)) * x_1)
return f_score
def make_kmer_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def kmer(data_seq, k):
# calculate the k-mer feature of a seq
RNA_code = 'ACGT'
code_values = make_kmer_list(3, RNA_code)
count = np.zeros((len(data_seq), len(code_values)))
for i, line_value in enumerate(data_seq.values): # for every samples
for j, code_value in enumerate(line_value[0]): # for every position
if j <= len(line_value[0]) - k + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + k]:
count[i][p] += 1
count /= len(code_values) - k + 1
return count
def MvPS3merNP(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def MvPS3merNP_KL(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
positive_seq_value[positive_seq_value <= 0] = 1e-09
positive_seq_value_log = np.log(positive_seq_value)
# positive_seq_value_log[np.isinf(positive_seq_value_log)] = -10
negative_seq_value[negative_seq_value <= 0] = 1e-09
negative_seq_value_log = np.log(negative_seq_value)
# negative_seq_value_log[np.isinf(negative_seq_value_log)] = -10
Z = np.multiply(positive_seq_value, (positive_seq_value_log - negative_seq_value_log))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = | pd.DataFrame(tra_positive_seq) | pandas.DataFrame |
#-*- coding: utf-8 -*-
import sys
import random
import numpy as np
import pandas as pd
import utility_1
import h5py
import json
eps=1e-12
def countCG(strs):
strs = strs.upper()
return float((strs.count("C")+strs.count("G")))/(len(strs))
def countCG_N(strs):
strs = strs.upper()
return float((strs.count("C")+strs.count("G")))/(len(strs)-strs.count("N")+eps)
def countCG_skew(strs):
strs = strs.upper()
num1, num2 = strs.count("G"), strs.count("C")
return float((num1-num2))/(num1+num2+eps)
def one_hot_encoding(seq, seq_len):
vec1 = np.zeros((4,seq_len))
cnt = 0
for i in range(0,seq_len):
print(i)
if seq[i]=='A':
vec1[0,i] = 1
elif seq[i]=='G':
vec1[1,i] = 1
elif seq[i]=='C':
vec1[2,i] = 1
elif seq[i]=='T':
vec1[3,i] = 1
else:
pass
return np.int64(vec1)
def index_encoding(seq, seq_len, seq_dict):
vec1 = np.zeros(seq_len)
for i in range(0,seq_len):
vec1[i] = seq_dict[seq[i]]
return np.int64(vec1)
# Read sequences as strings ("N" retained)
def getString(fileStr):
file = open(fileStr, 'r')
gen_seq = ""
lines = file.readlines()
for line in lines:
line = line.strip()
gen_seq += line
gen_seq = gen_seq.upper()
return gen_seq
# Read sequences of format fasta ("N" removed)
def getStringforUnlabel(fileStr):
file = open(fileStr, 'r')
gen_seq = ""
lines = file.readlines()
for line in lines:
if(line[0] == ">"):
continue
else:
line = line.strip()
gen_seq += line
gen_seq = gen_seq.upper()
gen_seq = gen_seq.replace("N", "")
return gen_seq
def get_reverse_str(str):
str = str.upper()
str_new=""
for i in range(len(str)):
if(str[i]=="T"):
str_new+="A"
elif(str[i]=="A"):
str_new+="T"
elif(str[i]=="G"):
str_new+="C"
elif(str[i]=="C"):
str_new+="G"
else:
str_new+=str[i]
return str_new
# Get sequence of 2K+1 centered at pos
def getSubSeq(str, pos, K):
n = len(str)
l = pos - K
r = pos + K + 1
if l > r or l < 0 or r > n - 1:
return 0
elif "N" in str[l:r]:
return 0
return str[l:r]
# Get sequence of 2K+1 centered at pos
def getSubSeq2(str, pos, K):
n = len(str)
l = max(0, pos - K)
r = min(n - 1, pos + K + 1)
if l > r:
print(l, pos, r)
print("left pointer is bigger than right one")
return 0
return str[l:pos]+" "+str[pos]+" "+str[pos+1:r]
# Convert DNA to sentences with overlapping window of size K
def DNA2Sentence(dna, K):
sentence = ""
length = len(dna)
for i in range(length - K + 1):
sentence += dna[i: i + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
# Convert DNA to sentences with overlapping window of size K in reverse direction
def DNA2SentenceReverse(dna, K):
sentence = ""
length = len(dna)
for i in range(length - K + 1):
j = length - K - i
sentence += dna[j: j + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
def reverse(s):
str = ""
for i in s:
str = i + str
return str
# Convert DNA to sentences with overlapping window of size K in reverse direction
def DNA2SentenceReverse_1(dna, K):
sentence = ""
length = len(dna)
dna = reverse(dna)
for i in range(length - K + 1):
sentence += dna[i: i + K] + " "
# remove spaces
sentence = sentence[0 : len(sentence) - 1]
return sentence
# Convert DNA to sentences with non-overlapping window of size K
def DNA2SentenceJump(dna, K,step):
sentence = ""
length = len(dna)
i=0
while i <= length - K:
sentence += dna[i: i + K] + " "
i += step
return sentence
# Convert DNA to sentences with non-overlapping window of size K in reverse direction
def DNA2SentenceJumpReverse(dna, K,step):
sentence = ""
length = len(dna)
i=0
while j <= length - K:
i = length - K - j
sentence += dna[i: i + K] + " "
j += step
return sentence
def gen_Seq(Range):
print ("Generating Seq...")
table = pd.read_table(PATH1+"prep_data.txt",sep = "\t")
print (len(table))
table.drop_duplicates()
print (len(table))
label_file = open(PATH1+"LabelSeq", "w")
total = len(table)
list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \
"chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \
"chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"]
number_positive = 0
dict_pos={}
for i in range(total):
if (number_positive % 100 == 0) and (number_positive != 0):
print ("number of seq: %d of %d\r" %(number_positive,total),end = "")
sys.stdout.flush()
chromosome = table["chromosome"][i]
if chromosome in dict_pos.keys():
strs = dict_pos[chromosome]
else:
strs = processSeq.getString(ROOT_PATH1+"Chromosome_38/" + str(chromosome) + ".fa")
dict_pos[chromosome] = strs
bias = 7
start = int(table["start"][i] - 1 - Range + bias)
end = start + 23 + Range*2
strand = table["strand"][i]
edstrs1 = strs[start : end]
if strand == "-":
edstrs1 = edstrs1[::-1]
edstrs1 = processSeq.get_reverse_str(edstrs1)
if "N" in edstrs1:
table = table.drop(i)
continue
outstr = "%s\n"%(edstrs1)
label_file.write(outstr)
number_positive += 1
table.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False)
def get_target():
table = pd.read_table(PATH1+"prep_data.txt", sep="\t")
print (len(table))
table.drop_duplicates()
print (len(table))
target_file = open(PATH1+"TargetSeq", "w")
for i in range(len(table)):
target = table['target'][i].upper()
target_file.write(target+"\n")
target_file.close()
def prep_data():
chrom_list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", \
"chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", \
"chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY","chrM"]
tab = pd.read_table(PATH1+"casoffinder_CHANGEseq_joined.tsv",sep = '\t')
tab = tab[tab['chromosome'].isin(chrom_list)]
tab['label'] = 1 - tab['reads'].isna()
tab['end'] = tab['start'] + 23
print (tab['chromosome'].unique())
tab.to_csv(PATH1+"prep_data.txt",sep = "\t",index = False)
def load_file(f_name,length,vec_name):
base_code = {
'A': 0,
'C': 1,
'G': 2,
'T': 3,
}
num_pairs = sum(1 for line in open(f_name))
# number of sample pairs
num_bases = 4
with open(f_name, 'r') as f:
line_num = 0 # number of lines (i.e., samples) read so far
for line in f.read().splitlines():
if (line_num % 100 == 0) and (line_num != 0):
print ("number of input data: %d\r" %(line_num),end= "")
sys.stdout.flush()
if line_num == 0:
# allocate space for output
seg_length = length # number of bases per sample
Xs_seq1 = np.zeros((num_pairs, num_bases, seg_length))
for start in range(len(line)):
if line[start] in base_code:
print (start)
break
base_num = 0
for x in line[start:start+length]:
if x != "N":
Xs_seq1[line_num, base_code[x], base_num] = 1
base_num += 1
line_num += 1
X = Xs_seq1
np.save("../%s" %(vec_name),X)
def kmer_dict(K):
vec1 = ['A','G','C','T']
vec2 = vec1.copy() # kmer dict
vec3 = []
num1 = len(vec1)
for k1 in range(1,K):
for character in vec1:
for temp1 in vec2:
seq1 = character+temp1
vec3.append(seq1)
vec2 = vec3.copy()
vec3 = []
return vec2
def kmer_counting(seq, K, kmer_dict1):
len1 = len(kmer_dict1)
vec = np.zeros((len1),dtype=np.float32)
len2 = len(seq)-K+1
cnt = 0
for kmer in kmer_dict1:
num1 = seq.count(kmer)
vec[cnt] = num1
cnt = cnt+1
vec = vec*1.0/len2
return vec
def align_region(species_id):
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
def load_seq_kmer(species_id, file1, filename2, K, kmer_dict1):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num1 = len(chrom)
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,2))
f_list = np.zeros((num1,feature_dim))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
vec = kmer_counting(line,K,kmer_dict1)
# line_list.append(line)
# f_list.append(vec)
# line_list.append(line)
# N_list.append(line.count('N'))
flag = 0
serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N')
f_list[cnt] = vec
filename1 = '%s.vec'%(species_id)
np.save(filename1,(serial_list,f_list))
return serial_list, f_list
# load the annotation file and the sequence feature file
# return kmer feature: num_samples*feature_dim
# return one-hot encoding feature: num_samples*4*feature_dim
def load_seq_1_ori(species_id, file1, filename2, K, kmer_dict1):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
label = np.asarray(file1['label'])
group_label = np.asarray(file1['group_label'])
signal = np.asarray(file1['signal'])
num1 = len(chrom)
len1 = stop-start
seq_len = int(np.median(len1))
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,2))
feature_dim = len(kmer_dict1)
f_list = np.zeros((num1,feature_dim))
f_mtx = np.zeros((num1,4,seq_len))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
i = 0
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
print(cnt)
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
vec = kmer_counting(line,K,kmer_dict1)
# line_list.append(line)
# f_list.append(vec)
flag = 0
serial_list[cnt,0], serial_list[cnt,1] = serial[cnt], line.count('N')
f_list[cnt] = vec
f_mtx[cnt] = one_hot_encoding(line, seq_len)
i += 1
if i % 100 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
b = np.where(serial_list[:,0]>=0)[0]
serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b]
# filename1 = '%s.vec'%(species_id)
# np.save(filename1,(serial_list,f_list))
return serial_list, f_list, f_mtx, label, group_label, signal
# load feature
def load_seq_altfeature_1(species_id, file1, filename2, output_filename):
# file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_id), '%s.start'%(species_id), '%s.stop'%(species_id)
chrom, start, stop, serial = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
label = np.asarray(file1['label'])
group_label = np.asarray(file1['group_label'])
signal = np.asarray(file1['signal'])
num1 = len(chrom)
len1 = stop-start
seq_len = int(np.median(len1))
file = open(filename2, 'r')
# serial_list, line_list = [], []
serial_list = -np.ones((num1,3))
feature_dim = 3
# num1 = 2000
f_list = np.zeros((num1,feature_dim))
# f_mtx = np.zeros((num1,4,seq_len))
lines = file.readlines()
num_line = len(lines)
cnt = -1
flag = 0
print(num_line,num1)
# temp1 = int(num_line/2)
i = 0
serial_vec, seq_vec = [], []
for line in lines:
if(line[0]==">"):
# continue
# line: >chr1:5-10
# print(cnt)
cnt = cnt + 1
str1 = line[1:]
temp1 = str1.split(':')
t_chrom = temp1[0]
temp2 = temp1[1].split('-')
t_start, t_stop = int(temp2[0]), int(temp2[1])
chrom1, start1, stop1, serial1 = chrom[cnt], start[cnt], stop[cnt], serial[cnt]
if (chrom1==t_chrom) and (start1==t_start) and (stop1==t_stop):
flag = 1
else:
b = np.where((chrom==t_chrom)&(start==t_start)&(stop==t_stop))[0]
if len(b)>0:
cnt = b[0]
flag = 1
else:
if flag == 1:
line = line.strip().upper()
# vec = kmer_counting(line,K,kmer_dict1)
serial_vec.append([cnt,serial[cnt]])
seq_vec.append(line)
GC_profile = countCG(line)
GC_profile1 = countCG_N(line)
GC_skew = countCG_skew(line)
vec = [GC_profile,GC_profile1,GC_skew]
# line_list.append(line)
# f_list.append(vec)
flag = 0
serial_list[cnt,0], serial_list[cnt,1], serial_list[cnt,2] = serial[cnt], len(line), line.count('N')
f_list[cnt] = vec
# f_mtx[cnt] = one_hot_encoding(line, seq_len)
i += 1
if i % 1000 == 0:
print("%d of %d\r" %(i,num1), end = "")
sys.stdout.flush()
# if cnt>1000:
# break
# b = np.where(serial_list[:,0]>=0)[0]
# serial_list, f_list, f_mtx, label, group_label = serial_list[b], f_list[b], f_mtx[b], label[b], group_label[b]
# filename1 = '%s.vec'%(species_id)
# np.save(filename1,(serial_list,f_list))
serial_vec = np.asarray(serial_vec)
fields = ['index','serial','seq']
data1 = pd.DataFrame(columns=fields)
data1[fields[0]], data1[fields[1]] = serial_vec[:,0], serial_vec[:,1]
data1[fields[2]] = seq_vec
# data1.to_csv('test_seq.txt',index=False,sep='\t')
data1.to_csv(output_filename,index=False,sep='\t')
return serial_list, f_list, label, group_label, signal
# feature 1: GC profile
# feature 2: GC skew
# def load_seq_altfeature(filename2, K, kmer_dict1, sel_idx):
def load_seq_altfeature(filename2, sel_idx):
file2 = | pd.read_csv(filename2,sep='\t') | pandas.read_csv |
import pandas as pd
# which 10 subjects do we want to clean data for?
subjects = ['DE220', 'DE221', 'DE222', 'DE223', 'DE224', 'DE225', 'DE226', 'DE227', 'DE228', 'DE229', 'DE230']
for sub in subjects:
# let's add in a note so we know which subject is being worked on
print('Working on ', sub)
# read in participant data
dbdm_run1 = pd.read_csv(f'/Users/lizbeard/Desktop/intro-to-coding-2021/misc/logs/{k}/{k}_dbdm_run_1.csv')
dbdm_run2 = pd.read_csv(f'/Users/lizbeard/Desktop/intro-to-coding-2021/misc/logs/{k}/{k}_dbdm_run_2.csv')
ebdm_run1 = pd.read_csv(f'/Users/lizbeard/Desktop/intro-to-coding-2021/misc/logs/{k}/{k}_ebdm_run_1.csv')
ebdm_run2 = | pd.read_csv(f'/Users/lizbeard/Desktop/intro-to-coding-2021/misc/logs/{k}/{k}_ebdm_run_2.csv') | pandas.read_csv |
from pathlib import Path
import click
import feather
import pandas as pd
from dotenv import find_dotenv
from dotenv import load_dotenv
from scipy.io import arff
from src.utils.logger import info
from src.utils.logger import init_logger
def make_dataset(name):
"""Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
info(f"{name}: making final data set from raw data")
# useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
if name == "EEG":
input_file = Path.joinpath(project_dir, "data", "raw", "EEG Eye State.arff")
output_file = Path.joinpath(project_dir, "data", "processed", "EEG.feather")
data = arff.loadarff(input_file)
df = pd.DataFrame(data[0])
df.eyeDetection = df.eyeDetection.astype('int')
feather.write_dataframe(df, str(output_file))
elif name == "ptbdb":
input_file_abnormal = Path.joinpath(project_dir, "data", "raw", "ptbdb_abnormal.csv")
input_file_normal = Path.joinpath(project_dir, "data", "raw", "ptbdb_normal.csv")
output_file = Path.joinpath(project_dir, "data", "processed", "ptbdb.feather")
df_abnormal = pd.read_csv(input_file_abnormal, header=None)
df_normal = pd.read_csv(input_file_normal, header=None)
df = pd.concat([df_abnormal, df_normal])
feather.write_dataframe(df, str(output_file))
elif name == "mitbih":
# merge test and train for the purpose of this project
input_file_train = Path.joinpath(project_dir, "data", "raw", "mitbih_train.csv")
input_file_test = Path.joinpath(project_dir, "data", "raw", "mitbih_test.csv")
output_file = Path.joinpath(project_dir, "data", "processed", "MITBIH.feather")
df_abnormal = pd.read_csv(input_file_train, header=None)
df_normal = | pd.read_csv(input_file_test, header=None) | pandas.read_csv |
# Created on 2020/7/15
# This module is for the class TimeSeries and related functions.
# Standard library imports
from datetime import datetime
from typing import Any, Callable, Optional, Union
import warnings
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from statsmodels.api import OLS
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from typeguard import typechecked
# Local application imports
from .. import exceptions
# Dictionary of Pandas' Offset Aliases
# and their numbers of appearance in a year.
DPOA = {'D': 365, 'B': 252, 'W': 52,
'SM': 24, 'SMS': 24,
'BM': 12, 'BMS': 12, 'M': 12, 'MS': 12,
'BQ': 4, 'BQS': 4, 'Q': 4, 'QS': 4,
'Y': 1, 'A':1}
# Datetimes format
fmt = "%Y-%m-%d %H:%M:%S"
fmtz = "%Y-%m-%d %H:%M:%S %Z%z"
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
@typechecked
def get_list_timezones() -> None:
"""
Lists all the time zone names that can be used.
"""
print(pytz.all_timezones)
return None
# CLASS Series
@typechecked
class Series:
"""
Abstract class defining a Series and its methods.
This class serves as a parent class for TimeSeries and CatTimeSeries.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
unit : str or None
Unit of the series values.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a panda.Series or pandas.DataFrame as an argument and initializes the time series.
"""
# Deal with DataFrame / Series
if (data is None) or (data.empty is True):
self.data = pd.Series(index=None, data=None)
self.start_utc = None
self.end_utc = None
self.nvalues = 0
self.freq = None
self.name = 'Empty TimeSeries'
else:
# Making sure the user entered a pandas.Series or pandas.DataFrame
# with just an index and one column for values
if isinstance(data, pd.DataFrame):
if data.shape[1] != 1:
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = pd.Series(data.iloc[:, 0])
elif not isinstance(data, pd.Series):
raise AssertionError("Time series must be built from a pandas.Series or a pandas.DataFrame with only one value column.")
else:
self.data = data
# Deal with time
if type(data.index[0]) == 'str':
data.index = pd.to_datetime(data.index, format=fmt)
self.start_utc = datetime.strptime(str(data.index[0]), fmt)
self.end_utc = datetime.strptime(str(data.index[-1]), fmt)
self.nvalues = data.shape[0]
else:
self.start_utc = data.index[0]
self.end_utc = data.index[-1]
self.nvalues = data.shape[0]
try:
self.freq = pd.infer_freq(self.data.index)
except:
self.freq = 'Unknown'
# Deal with unit
self.unit = unit
# Deal with timezone
if tz is None:
self.tz = 'UTC'
self.timezone = pytz.utc
else:
self.tz = tz
self.timezone = pytz.timezone(tz)
# Deal with name (nickname)
if name is None:
name = ""
self.name = name
def get_start_date_local(self) -> datetime.date:
"""
Returns the attribute UTC start date in local time zone defined by attribute timezone.
"""
start_tmp = datetime.strptime(str(self.start_utc), fmt).astimezone(self.timezone)
return datetime.strftime(start_tmp, format=fmtz)
def get_end_date_local(self) -> datetime.date:
"""
Returns the attribute UTC end date in local time zone defined by attribute timezone.
"""
end_tmp = datetime.strptime(str(self.end_utc), fmt).astimezone(self.timezone)
return datetime.strftime(end_tmp, format=fmtz)
def specify_data(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> Union[pd.Series, pd.DataFrame]:
"""
Returns the appropriate data according to user's specifying
or not the desired start and end dates.
"""
# Prepare data
if (start is None) and (end is None):
data = self.data
elif (start is None) and (end is not None):
data = self.data[:end]
elif (start is not None) and (end is None):
data = self.data[start:]
elif (start is not None) and (end is not None):
data = self.data[start:end]
return data
def start_end_names(self,
start: Union[str, datetime.date],
end: Union[str, datetime.date]
) -> (str, str):
"""
Recasts the time series dates to 10 characters strings
if the date hasn't been re-specified (i.e. value is 'None').
"""
s = str(self.start_utc)[:10] if (start is None) else start
e = str(self.end_utc)[:10] if (end is None) else end
return s, e
def is_sampling_uniform(self) -> bool:
"""
Tests if the sampling of a time series is uniform or not.
Returns a boolean value True when the sampling is uniform, False otherwise.
"""
# Prepare data
sampling = [datetime.timestamp(x) for x in self.data.index]
assert(len(sampling)==self.nvalues)
intervals = [sampling[x] - sampling[x-1] for x in range(1,self.nvalues,1)]
# Testing
prev = intervals[0]
for i in range(1,len(intervals),1):
if intervals[i] - prev > 1.e-6:
return False
return True
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
# CLASS TimeSeries
@typechecked
class TimeSeries(Series):
"""
Class defining a time series and its methods.
This class inherits from the parent class 'Series'.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
type : str
Type of the series.
unit : str or None
Unit of the time series values.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a pandas.Series or pandas.DataFrame as an argument and initializes the time series.
"""
super().__init__(data=data, tz=tz, unit=unit, name=name)
# Add attributes initialization if needed
self.type = 'TimeSeries'
### Plot INFORMATION ABOUT THE TIME SERIES ###
def simple_plot(self,
figsize: (float, float) = (12, 5),
dpi: float=100
) -> None:
"""
Plots the time series in a simple way.
Parameters
----------
figsize : 2-tuple of ints
Dimensions of the figure.
dpi : int
Dots-per-inch definition of the figure.
Returns
-------
None
None
"""
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(self.data.index, self.data.values, color='k')
# Make it cute
if self.name is None:
title = "Time series from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
else:
title = "Time series " + self.name + " from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)
plt.show()
return None
@typechecked
def distribution(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the distribution of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
# Plot distribution of values
plt.figure(figsize=figsize, dpi=dpi)
data.hist(bins=bins, grid=False, color='w', lw=2, edgecolor='k')
# Make it cute
s,e = self.start_end_names(start, end)
title = "Distribution of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Hits")
plt.show()
return None
def density(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Plots the density of values between two dates.
"""
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot distribution of values
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
data.plot.density(color='k', ax=ax, legend=False)
# Make it cute
title = "Density plot of values between " + s + " and " + e
plt.gca().set(title=title, xlabel="Value", ylabel="Density")
plt.show()
return None
def simple_plot_distrib(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
bins: int=20,
figsize: (float, float) = (10, 4),
dpi: float=100
) -> None:
"""
Plots the time series and its associated distribution of values between two dates.
"""
# Checks
assert(isinstance(bins,int))
# Prepare data
data = self.specify_data(start, end)
s,e = self.start_end_names(start, end)
# Plot
fig = plt.figure(figsize=figsize, dpi=dpi)
gs = fig.add_gridspec(1, 4)
# Plot 1 - Time Series simple plot
f_ax1 = fig.add_subplot(gs[:, 0:3])
f_ax1.plot(data.index, data.values, color='k')
if self.name is None:
title1 = "Time series from " + s + " to " + e
else:
title1 = "Time series " + self.name + " from " + s + " to " + e
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
if self.unit is None:
ylabel = 'Value'
else:
ylabel = 'Value (' + self.unit + ')'
plt.gca().set(title=title1, xlabel=xlabel, ylabel=ylabel)
# Plot 2 - Distribution of values
f_ax2 = fig.add_subplot(gs[:, 3:])
data.hist(bins=bins, grid=False, ax=f_ax2, orientation="horizontal", color='w', lw=2, edgecolor='k')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.3, hspace=0)
title2 = "Distribution"
plt.gca().set(title=title2, xlabel=ylabel, ylabel="Hits")
plt.show()
return None
def get_sampling_interval(self) -> Union[str, datetime.date]:
"""
Returns the sampling interval for a uniformly-sampled time series.
"""
if(self.is_sampling_uniform()==False):
raise exceptions.SamplingError("Time series is not uniformly sampled.")
else:
idx1 = self.data.index[1]
idx0 = self.data.index[0]
intv = datetime.timestamp(idx1) - datetime.timestamp(idx0)
return intv
def lag_plot(self,
lag: int=1,
figsize: (float, float) = (5, 5),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns the scatter plot x_t v.s. x_{t-l}.
"""
# Check
try:
assert(lag>0)
except AssertionError:
raise AssertionError("The lag must be an integer equal or more than 1.")
# Do the plot
fig = plt.figure(figsize=figsize, dpi=dpi)
pd.plotting.lag_plot(self.data, lag=lag, c='black', alpha=alpha)
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Lag plot of time series " + tmp_name
plt.gca().set(title=title, xlabel="x(t)", ylabel="x(t+"+str(lag)+")")
plt.show()
return None
def lag_plots(self,
nlags: int=5,
figsize: (float, float) = (10, 10),
dpi: float=100,
alpha: float=0.5
) -> None:
"""
Returns a number of scatter plots x_t v.s. x_{t-l}
where l is the lag value taken from [0,...,nlags].
Notes
-----
It is required that nlags > 1.
"""
# Check
try:
assert(nlags>1)
except AssertionError:
raise AssertionError("nlags must be an integer starting from 2.")
# Rule for the number of rows/cols
ncols = int(np.sqrt(nlags))
if(nlags % ncols == 0):
nrows = nlags // ncols
else:
nrows = nlags // ncols + 1
# Do the plots
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True,
figsize=figsize, dpi=dpi)
for i, ax in enumerate(axes.flatten()[:nlags]):
pd.plotting.lag_plot(self.data, lag=i+1, ax=ax, c='black', alpha=alpha)
ax.set_xlabel("x(t)")
ax.set_ylabel("x(t+"+str(i+1)+")")
# Set title
if self.name is None:
tmp_name = " "
else:
tmp_name = self.name
title = "Multiple lag plots of time series " + tmp_name
fig.suptitle(title)
plt.show()
return None
### SIMPLE DATA EXTRACTION ON THE TIME SERIES ###
def hist_avg(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical average of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
avg = data.values.mean()
return avg
def hist_std(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical standard deviation of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
std = data.values.std()
return std
def hist_variance(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical variance of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
var = data.values.var()
return var
def hist_skewness(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical skew of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
skew = stats.skew(data.values)
return skew
def hist_kurtosis(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the historical (Fisher) kurtosis of the time series
between two dates (default is the whole series).
"""
data = self.specify_data(start, end)
kurt = stats.kurtosis(data.values, fisher=False)
return kurt
def min(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the minimum of the series.
"""
data = self.specify_data(start, end)
ts_min = data.values.min()
return ts_min
def max(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> float:
"""
Returns the maximum of the series.
"""
data = self.specify_data(start, end)
ts_max = data.values.max()
return ts_max
def describe(self, start: Union[str, datetime.date]=None, end: Union[str, datetime.date]=None) -> None:
"""
Returns description of time series between two dates.
This uses the pandas function having same name.
"""
data = self.specify_data(start, end)
print(data.describe())
return None
### METHODS THAT ARE CLOSER TO FINANCIAL APPLICATIONS ###
def percent_change(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the percent change of the series (in %).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = data.pct_change()
new_ts = TimeSeries(data=new_data[1:], tz=self.tz, unit='%', name=name)
return new_ts
# Alias method of percent_change()
# For people with a Finance terminology preference
net_returns = percent_change
def gross_returns(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns the gross returns of the series (in %),
i.e. percent change + 1.
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from the time series.
"""
data = self.specify_data(start, end)
new_data = 1 + data.pct_change()
new_ts = TimeSeries(new_data[1:], tz=self.tz, name=name)
return new_ts
def hist_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Computes the net returns of the time series and
returns their associated historical volatility
between two dates (default is the whole series).
Notes
-----
When computing the percent change, first date gets
NaN value and is thus removed from calculation.
Since pandas.Series.pct_change() returns values in
percent, we divide by 100 to bring back numerical values.
"""
# Initialization
data = self.specify_data(start, end)
# Warning message
if (self.is_sampling_uniform() is not True) and (verbose is True):
warnings.warn("Index not uniformly sampled. Result could be meaningless.")
# Warning message
if (0. in data.values) and (verbose is True):
warnings.warn("Zero value in time series, will generate infinite return.")
# Computing net returns
net_returns = data.pct_change()[1:]
# Compute standard deviation, i.e. volatility
std = net_returns.values.std()
return std
def annualized_vol(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized volatility of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
"""
# Initializations
hvol = self.hist_vol(start, end, verbose=verbose)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return hvol * np.sqrt(DPOA[self.freq])
else:
raise ValueError('Annualized volatility could not be evaluated.')
def annualized_return(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the annualized return of the time series
between two dates (default is the whole series),
using the frequency of the time series when usable.
Arguments
---------
start : str or datetime
Starting date of selection.
end : str or datetime
Ending date of selection.
verbose : bool
Verbose option.
Returns
-------
float
Annualized return.
"""
# Initializations
gross_returns = self.gross_returns(start, end)
# Compute product of values
prd = gross_returns.data.prod()
# Checks
if (start is None) and (end is None):
assert(gross_returns.nvalues == self.nvalues-1)
if (gross_returns.freq != self.freq) and (verbose is True):
warning_message = "Gross_returns frequency and time series frequency do not match." \
+ " In that context, results may be meaningless."
warnings.warn(warning_message)
if (self.freq is not None) and (self.freq in DPOA.keys()):
return prd**(DPOA[self.freq]/gross_returns.nvalues) - 1
else:
raise ValueError('Annualized return could not be evaluated.')
def risk_ratio(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the risk ratio, i.e. the ratio of annualized return
over annualized volatility.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return ann_return / ann_volatility
def annualized_Sharpe_ratio(self,
risk_free_rate: float=0,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
verbose: bool=True
) -> float:
"""
Returns the Sharpe ratio, also known as risk adjusted return.
"""
ann_return = self.annualized_return(start, end)
ann_volatility = self.annualized_vol(start, end, verbose=verbose)
return (ann_return - risk_free_rate) / ann_volatility
### METHODS RELATED TO VALUE AT RISK ###
def hist_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical p-VaR (Value at Risk) between two dates.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100 * p % 1 != 0:
warning_message = f"Probability too precise, only closest percentile computed here." \
+ f"Hence for p = {str(p)} , percentile estimation is based on p = {str(int(100 * p))} %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
return np.percentile(data.values, int(100*p))
def hist_cvar(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the historical CVaR (Conditional Value at Risk) between two dates.
This quantity is also known as the Expected Shortfall (ES).
Returns
-------
float
CVaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
if 100*p%1 != 0:
warning_message = "Probability too precise, only closest percentile computed here." \
+ "Hence for p = " + str(p) + " , percentile estimation is based on p = " + str(int(100*p)) + " %."
warnings.warn(warning_message)
# Prepare data
data = self.specify_data(start, end)
var = self.hist_var(p=p, start=start, end=end)
# Computing CVaR
tmp_sum = 0
tmp_n = 0
for val in data.values:
if val <= var:
tmp_sum += val
tmp_n += 1
return tmp_sum / tmp_n
# Alias method of hist_cvar
# For people with a Finance terminology preference
hist_expected_shortfall = hist_cvar
def cornish_fisher_var(self,
p: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the VaR (Value at Risk) between two dates from
the Cornish-Fisher expansion.
Returns
-------
float
VaR value computed between the chosen dates.
"""
# Checks
assert(p>=0 and p<=1)
# Prepare data
data = self.specify_data(start, end)
# Compute z-score based on normal distribution
z = stats.norm.ppf(p)
# Compute modified z-score from expansion
s = stats.skew(data.values)
k = stats.kurtosis(data.values, fisher=False)
new_z = z + (z**2 - 1) * s/6 + (z**3 - 3*z) * (k-3)/24 \
- (2*z**3 - 5*z) * (s**2)/36
return data.values.mean() + new_z * data.values.std(ddof=0)
### AUTOCORRELATION COMPUTATION ###
def autocorrelation(self,
lag: int=1,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the autocorrelation of the time series for a specified lag.
We use the function:
$rho_l = frac{Cov(x_t, x_{t-l})}{\sqrt(Var[x_t] Var[x_{t-l}])}
where $x_t$ is the time series at time t.
Cov denotes the covariance and Var the variance.
We also use the properties $rho_0 = 1$ and $rho_{-l} = rho_l$
(using LaTeX notations here).
"""
# Initialization
l = abs(lag)
# Trivial case
if l==0:
return 1
# Prepare data
data = self.specify_data(start, end)
# General case
assert(l < data.shape[0])
shifted_data = data.shift(l)
mu = data.mean()
sigma = data.std()
numerator = np.mean((data - mu) * (shifted_data - mu))
denominator = sigma**2
return numerator / denominator
def plot_autocorrelation(self,
lag_min: int=0,
lag_max: int=25,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
figsize: (float, float) = (8, 4),
dpi: float=100
) -> None:
"""
Uses autocorrelation method in order to return a plot
of the autocorrelation againts the lag values.
"""
# Checks
assert(lag_max > lag_min)
# Computing autocorrelation
x_range = list(range(lag_min, lag_max+1, 1))
ac = [self.autocorrelation(lag=x, start=start, end=end) for x in x_range]
# Plot
plt.figure(figsize=figsize, dpi=dpi)
plt.bar(x_range, ac, color='w', lw=2, edgecolor='k')
s,e = self.start_end_names(start, end)
title = "Autocorrelation from " + s + " to " + e + " for lags = [" \
+ str(lag_min) + "," + str(lag_max) + "]"
plt.gca().set(title=title, xlabel="Lag", ylabel="Autocorrelation Value")
plt.show()
return None
def acf_pacf(self,
lag_max: int=25,
figsize: (float, float) = (12, 3),
dpi: float=100
) -> None:
"""
Returns a plot of the AutoCorrelation Function (ACF)
and Partial AutoCorrelation Function (PACF) from statsmodels.
"""
# Plot
fig, axes = plt.subplots(1,2, figsize=figsize, dpi=dpi)
plot_acf(self.data.values.tolist(), lags=lag_max, ax=axes[0])
plot_pacf(self.data.values.tolist(), lags=lag_max, ax=axes[1])
plt.show()
return None
### SIMPLE TRANSFORMATIONS OF THE TIME SERIES TO CREATE A NEW TIME SERIES ###
def trim(self,
new_start: Union[str, datetime.date],
new_end: Union[str, datetime.date]
) -> 'TimeSeries':
"""
Method that trims the time series to the desired dates
and send back a new time series.
"""
new_data = self.data[new_start:new_end]
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def add_cst(self,
cst: float=0
) -> 'TimeSeries':
"""
Method that adds a constant to the time series.
"""
new_data = self.data + cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def mult_by_cst(self,
cst: float=1
) -> 'TimeSeries':
"""
Method that multiplies the time series by a constant.
"""
new_data = self.data * cst
if name is None:
name = self.name
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def linear_combination(self,
other_ts: 'TimeSeries',
factor1: float=1,
factor2: float=1):
"""
Method that adds a time series to the current one
according to linear combination:
factor1 * current_ts + factor2 * other_ts.
"""
# Checks
if (self.unit != other_ts.unit):
raise AssertionError("Time series to combine must have same unit.")
# Compute linear combination
new_data = factor1 * np.array(self.data.values) + factor2 * np.array(other_ts.data.values)
new_data = pd.Series(index=self.data.index, data=new_data)
new_ts = TimeSeries(data=new_data, tz=self.tz, unit=self.unit, name=name)
return new_ts
def convolve(self,
func: Callable[[float], float],
x_min: float,
x_max: float,
n_points: int,
normalize: bool=False,
name: str=None
) -> 'TimeSeries':
"""
Performs a convolution of the time series with a function 'func'.
The 'normalize' option allows to renormalize 'func' such that
the sum of its values is one.
Parameters
----------
func : function
Function we want to employ for convolution.
x_min : float
Minimum value to consider for 'func'.
x_max : float
Maximum value to consider for 'func'.
n_points : int
Number of points to consider in the function.
normalize: bool
Option to impose the sum of func values to be 1.
name : str
New name.
Returns
-------
TimeSeries
Convolved time series.
"""
# Getting the time series values
ts_vals = self.data.values
# Getting the convolving function values
X = np.linspace(x_min, x_max, n_points)
func_vals = []
for x in X:
func_vals.append(func(x))
if normalize==True:
sum_vals = np.array(func_vals).sum()
func_vals /= sum_vals
# Dealing with name
if name is None:
name = self.name + str('-Convolved')
# Generate convolved values
convolved_vals = np.convolve(func_vals, ts_vals.flatten(), mode='same')
if name is None:
name = "Convolved-" + self.name
convolved_ts = TimeSeries(data=pd.Series(index=self.data.index, data=convolved_vals),
tz=self.tz,
unit=self.unit,
name=name)
return convolved_ts
def get_drawdowns(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Computes the drawdowns and returns a new time series from them.
Returns
-------
TimeSeries
Time series of the drawdowns.
"""
# Prepare data
data = self.specify_data(start, end)
# Compute drawdowns
trailing_max = data.cummax()
drawdowns = (data - trailing_max) / trailing_max
# Make a time series from them
new_ts = TimeSeries(data=drawdowns, tz=self.tz, name=name)
return new_ts
def max_drawdown(self,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None
) -> float:
"""
Returns the maximum drawdown of a time series.
Returns
-------
float
Maximum drawdown.
"""
# Prepare data
data = self.specify_data(start, end)
# Compute drawdowns
trailing_max = data.cummax()
drawdowns = (data - trailing_max) / trailing_max
max_drawdowns = -drawdowns.values.min()
return max_drawdowns
def divide_by_timeseries(self,
other_ts: 'TimeSeries',
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Returns a time series from the division of the current time series
with another time series (current_ts / other_ts).
Returns
-------
TimeSeries
Division time series.
"""
# Prepare data
data = self.specify_data(start, end)
# Check that data has the same index
# as the dividing time series
assert(data.index.tolist() == other_ts.data.index.tolist())
# Do the division
new_data = np.array(data.values) / np.array(other_ts.data.values)
new_data = pd.Series(index=data.index, data=new_data)
new_ts = TimeSeries(new_data, tz=self.tz, name=name)
return new_ts
def add_gaussian_noise(self,
mu: float,
sigma: float,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
name: str=""
) -> 'TimeSeries':
"""
Adds a Gaussian noise to the current time series.
Parameters
----------
mu : float
Mean parameter of the noise.
sigma : float
Standard deviation of the noise.
start : str
Starting date.
end : str
Ending date.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
Time series with added Gaussian noise.
"""
# Prepare data
data = self.specify_data(start, end)
n = len(data.values)
# Generate noise
noise = np.random.normal(loc=mu, scale=sigma, size=n)
# Generate new time series
new_data = []
for i in range(n):
new_data.append(data.values[i] + noise[i])
new_data = pd.Series(index=data.index, data=new_data)
new_ts = TimeSeries(new_data, tz=self.tz, name=name)
return new_ts
### FITTING METHODS ###
def rolling_avg(self,
pts: int=1
) -> 'TimeSeries':
"""
Transforms the time series into a rolling window average time series.
"""
new_values = [self.data[x-pts+1:x].mean() for x in range(pts-1, self.nvalues, 1)]
new_data = pd.Series(index=self.data.index[pts-1:self.nvalues], data=new_values)
new_ts = TimeSeries(new_data, tz=self.tz)
return new_ts
def polyfit(self,
order: int = 1,
start: Union[str, datetime.date] = None,
end: Union[str, datetime.date] = None
) -> 'TimeSeries':
"""
Provides a polynomial fit of the time series.
"""
# Prepare data
data = self.specify_data(start, end)
new_index = [datetime.timestamp(x) for x in data.index]
new_values = [data.values.tolist()[x] for x in range(len(data))]
# Do the fit
fit_formula = np.polyfit(new_index, new_values, deg=order)
model = np.poly1d(fit_formula)
print("Evaluated model: \n", model)
yfit = [model(x) for x in new_index]
# Build series
assert(len(data.index)==len(yfit))
new_data = pd.Series(index=data.index, data=yfit)
new_ts = TimeSeries(new_data, tz=self.tz)
return new_ts
def sample_uniformly(self) -> 'TimeSeries':
"""
Returns a new time series for which the sampling is uniform.
"""
# Check if actually we need to do something
if self.is_sampling_uniform() == True:
print("Time series already has a uniform sampling. Returning the same time series.")
return self
# Prepare the new index
original_timestamps = [datetime.timestamp(x) for x in self.data.index]
original_values = self.data.values
N = len(original_values)
assert(N>2)
new_timestamps = np.linspace(original_timestamps[0], original_timestamps[-1], N)
new_index = [datetime.fromtimestamp(x) for x in new_timestamps]
# Obtaining the new values from interpolation
before = [original_timestamps[0], original_values[0]]
after = [original_timestamps[1], original_values[1]]
new_values = [0.] * N
j=0
k=0
for i in range(len(new_timestamps)):
# Move forward in original table
# Known point before interpolation point
while (before[0] <= new_timestamps[i] and j<N-1):
j+=1
before[0] = original_timestamps[j]
j-=1
before[0] = original_timestamps[j]
before[1] = original_values[j]
# Known point after interpolation point
while (after[0] <= new_timestamps[i] and k<N-1):
k+=1
after[0] = original_timestamps[k]
after[1] = original_values[k]
# Check the new date is sandwiched between the 2 original dates
assert(before[0] <= new_timestamps[i])
assert(new_timestamps[i] <= after[0])
assert(j<=k)
# Find the new value from interpolation
slope = (after[1] - before[1]) / (after[0] - before[0])
new_values[i] = before[1] + slope * (new_timestamps[i] - before[0])
# Build the time series
new_data = pd.Series(index=new_index, data=new_values)
new_ts = TimeSeries(new_data, tz=self.tz)
return new_ts
def decompose(self,
polyn_order: int=None,
start: Union[str, datetime.date]=None,
end: Union[str, datetime.date]=None,
extract_seasonality: bool=False,
period: int=None
) -> list:
"""
Performs a decomposition of the time series
and returns the different components.
Parameters
----------
polyn_order : None or int
Order of the polynomial when fitting a non-linear component.
start : str
Starting date.
end : str
Ending date.
extract_seasonality : bool
Option to extract seasonality signal.
period : int
Period of seasonality.
Returns
-------
List of TimeSeries
Content of the list depends on choices in arguments.
"""
# Check
if polyn_order is not None:
try:
assert(polyn_order>1)
except AssertionError:
raise AssertionError("polyn_order must be equal or more than 2.")
# Prepare data in the specified period
data = self.specify_data(start, end)
X = [datetime.timestamp(x) for x in data.index]
X = np.reshape(X, (len(X), 1))
y = [data.values.tolist()[x] for x in range(len(data))]
# Fit the linear component
model = LinearRegression()
model.fit(X, y)
# Extract the linear trend
lin_trend_y = model.predict(X)
lin_trend_data = pd.Series(index=data.index, data=lin_trend_y)
lin_trend_ts = TimeSeries(lin_trend_data, tz=self.tz, name=self.name+"-Linear")
# Remove the linear trend to the initial time series
nonlin_y = y - lin_trend_y
# Remove a polynomial component of a certain order
if polyn_order is not None:
polyn_model = make_pipeline(PolynomialFeatures(polyn_order), Ridge())
polyn_model.fit(X, nonlin_y)
polyn_component_y = polyn_model.predict(X)
polyn_comp_data = pd.Series(index=data.index, data=polyn_component_y)
polyn_comp_ts = TimeSeries(polyn_comp_data, tz=self.tz, name=self.name+"-Polynomial")
# Generate the resting part time series
if polyn_order is not None:
rest_y = nonlin_y - polyn_component_y
else:
rest_y = nonlin_y
rest_data = pd.Series(index=data.index, data=rest_y)
rest_ts = TimeSeries(rest_data, tz=self.tz, name=self.name+"-Rest")
# Extracting seasonality
if extract_seasonality==True:
# Receiving the period of seasonality in the residue
try:
assert(period)
assert(isinstance(period, int))
except AssertionError:
raise AssertionError("Period must be specified for 'extrac_seasonality = True' mode.")
P = period
# Cut the series into seasonality-period chunks
t = []
if int(len(rest_y))%P==0:
nchunks = int(len(rest_y))//P
else:
nchunks = int(len(rest_y))//P + 1
for i in range(nchunks):
if i == nchunks - 1:
t.append(rest_y[i*P:])
else:
t.append(rest_y[i*P:i*P+P])
# Do the average of the chunks
t_avg = []
for i in range(P):
t_avg.append(np.mean([t[x][i] for x in range(nchunks)]))
# Create a new series repeating this pattern
seasonal_y = []
for i in range(len(rest_y)):
seasonal_y.append(t_avg[i%P])
seasonal_data = pd.Series(index=data.index, data=seasonal_y)
seasonal_ts = TimeSeries(seasonal_data, tz=self.tz, name=self.name+"-Seasonal")
# Build the residue time series
residue_y = rest_y - seasonal_y
residue_data = pd.Series(index=data.index, data=residue_y)
residue_ts = TimeSeries(residue_data, tz=self.tz, name=self.name+str("-Residue"))
# Return results
if polyn_order is not None:
if extract_seasonality==True:
return [lin_trend_ts, polyn_comp_ts, seasonal_ts, residue_ts]
else:
return [lin_trend_ts, polyn_comp_ts, rest_ts]
else:
if extract_seasonality==True:
return [lin_trend_ts, seasonal_ts, residue_ts]
else:
return [lin_trend_ts, rest_ts]
def gaussian_process(self,
rbf_scale: float,
rbf_scale_bounds: (float, float),
noise: float,
noise_bounds: (float, float),
alpha: float=1e-10,
plotting: bool=False,
figsize: (float, float) = (12, 5),
dpi: float=100
) -> ['TimeSeries', 'TimeSeries', 'TimeSeries']:
"""
Employs Gaussian Process Regression (GPR) from scikit-learn to fit a time series.
Parameters
rbf_scale : float
Length scale for the RBF kernel.
rbf_scale_bounds : 2-tuple of floats
Length scale bounds for the RBF kernel.
noise : float
Noise level for the white noise kernel.
noise_bounds : 2-tuple of floats
Noise level bounds for the white noise kernel.
alpha : float
Noise added to the diagonal of the kernel matrix during fitting.
plotting : bool
Option to plot or not the result of the GPR.
figsize : 2-tuple of ints
Dimensions of the figure.
dpi : int
Dots-per-inch definition of the figure.
Returns
-------
List of 3 TimeSeries
3 time series for the mean and the envelope +sigma and -sigma of standard deviation.
"""
# Shape the data
X = np.array([float(datetime.timestamp(x)) for x in self.data.index])[:, np.newaxis]
y = self.data.values.flatten()
# Set the kernel
initial_kernel = 1 * kernels.RBF(length_scale=rbf_scale,
length_scale_bounds=rbf_scale_bounds) \
+ kernels.WhiteKernel(noise_level=noise,
noise_level_bounds=noise_bounds)
# Do regression
gpr = GaussianProcessRegressor(kernel=initial_kernel,
alpha=alpha,
optimizer='fmin_l_bfgs_b',
n_restarts_optimizer=1,
random_state=0)
gpr = gpr.fit(X,y)
print("The GPR score is: ", gpr.score(X,y))
# Create fitting time series
N = len(y)
X_ = np.linspace(min(X)[0], max(X)[0], N)
# Mean fit
y_mean, y_cov = gpr.predict(X_[:,np.newaxis], return_cov=True)
idx = self.data.index
ts_mean = TimeSeries(pd.Series(index=idx, data=y_mean), tz=self.tz, name='Mean from GPR')
# Mean - (1-sigma)
y_std_m = y_mean - np.sqrt(np.diag(y_cov))
ts_std_m = TimeSeries(pd.Series(index=idx, data=y_std_m), tz=self.tz, name='Mean-sigma from GPR')
# Mean + (1-sigma)
y_std_p = y_mean + np.sqrt(np.diag(y_cov))
ts_std_p = TimeSeries(pd.Series(index=idx, data=y_std_p), tz=self.tz, name='Mean+sigma from GPR')
# Plot the result
if plotting==True:
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(self.data.index, y_mean, color='k', lw=3, label="Mean")
plt.plot(self.data.index, y_std_m, color='k', label="Mean - 1-sigma")
plt.plot(self.data.index, y_std_p, color='k', label="Mean + 1-sigma")
plt.fill_between(self.data.index, y_std_m, y_std_p, alpha=0.5, color='gray')
plt.plot(self.data.index, self.data.values, color='r', label=self.name)
title = "Gaussian Process Regression: \n Time series " \
+ " from " + str(self.start_utc)[:10] + " to " + str(self.end_utc)[:10]
plt.gca().set(title=title, xlabel="Date", ylabel="Value")
plt.legend()
plt.show()
# Returning the time series
return [ts_mean, ts_std_m, ts_std_p]
def make_selection(self,
threshold: float,
mode: str
) -> 'TimeSeries':
"""
Make a time series from selected events and the values of the time series.
Arguments
---------
threshold : float
Threshold value for selection.
mode : str
Mode to choose from (among ['run-ups', 'run-downs', 'symmetric']).
Returns
-------
TimeSeries
Time series of the selection.
Notes
-----
Function adapted from "Advances in Financial Machine Learning",
<NAME> (2018).
"""
# Checks
assert(mode in ['run-ups', 'run-downs', 'symmetric'])
# Implements the symmetric cumsum filter
t_events, s_pos, s_neg = [], 0, 0
diff = self.data.diff()
for t in diff.index[1:]:
s_pos = max(0, s_pos + diff.loc[t])
s_neg = min(0, s_neg + diff.loc[t])
if mode == 'run-downs':
if s_neg < -threshold:
s_neg = 0
t_events.append(t)
elif mode == 'run-ups':
if s_pos > threshold:
s_pos = 0
t_events.append(t)
elif mode == 'symmetric':
if s_neg < -threshold:
s_neg = 0
t_events.append(t)
elif s_pos > threshold:
s_pos = 0
t_events.append(t)
t_index = pd.DatetimeIndex(t_events)
# Get selection values into DataFrame
df_selection = self.data.loc[t_index]
# Make TimeSeries
ts_selection = TimeSeries(data=df_selection, tz=self.tz, unit=self.unit, name=self.name + "_Selection")
return ts_selection
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
# CLASS CatTimeSeries
@typechecked
class CatTimeSeries(Series):
"""
Class defining a categoric time series and its methods.
This class inherits from the parent class 'Series'.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
unit : str or None
Unit of the time series values.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
type : str
Type of the series.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a pandas.Series or pandas.DataFrame as an argument and initializes the time series.
"""
super().__init__(data=data, tz=tz, unit=unit, name=name)
# Add attributes initialization if needed
self.type = 'CatTimeSeries'
def prepare_cat_plot(self) -> (list, list, dict):
"""
Returns an appropriate dictionary to plot values of a CatTimeSeries.
"""
# Initialization
set_cats = sorted(list(set(self.data.values.flatten())))
n_cats = len(set_cats)
try:
assert(n_cats<=10)
except ValueError:
raise ValueError("Number of categories too large for colors handling.")
# Dates
X = [datetime.timestamp(x) for x in self.data.index]
# Adding one more step to show last value
delta = self.data.index[-1] - self.data.index[-2]
X.append(datetime.timestamp(self.data.index[-1] + delta))
# Category values
y = self.data.values.flatten().tolist()
# Copying the last values
y.append(y[-1])
# Prepare Colors
large_color_dict = { 0: 'Red', 1: 'DeepPink', 2: 'DarkOrange', 3: 'Yellow',
4: 'Magenta', 5: 'Lime', 6: 'Dark Green', 7: 'DarkCyan',
8: 'DarkTurquoise', 9:'DodgerBlue' }
restricted_keys = [int(x) for x in np.linspace(0,9,n_cats).tolist()]
restricted_colors = [large_color_dict[x] for x in restricted_keys]
keys_to_cats = [set_cats[x] for x in range(0,n_cats)]
# Create the restricted color dictionary
D = dict(zip(keys_to_cats, restricted_colors))
return X, y, D
def simple_plot(self,
figsize: (float, float) = (12, 5),
dpi: float = 100
) -> None:
"""
Plots the categorical time series in a simple way.
The number of categories is limited to 10 in order to easily handle colors.
Parameters
----------
figsize : 2-tuple of ints
Dimensions of the figure.
dpi : int
Dots-per-inch definition of the figure.
Returns
-------
None
None
"""
# Making the restricted color dictionary
X, y, D = self.prepare_cat_plot()
# Initiate figure
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
# Color block
left_X = X[0]
current_y = y[0]
for i in range(1,self.nvalues+1,1):
# For any block
if y[i] != current_y:
ax.fill_between([datetime.fromtimestamp(left_X), datetime.fromtimestamp(X[i])],
[0,0], [1,1], color=D[current_y], alpha=0.5)
left_X = X[i]
current_y = y[i]
# For the last block
if i == self.nvalues:
ax.fill_between([datetime.fromtimestamp(left_X), datetime.fromtimestamp(X[i])],
[0,0], [1,1], color=D[current_y], alpha=0.5)
# Make it cute
title = "Categorical Time series " + self.name + " from " + str(self.start_utc)[:10] \
+ " to " + str(self.end_utc)[:10]
if self.tz is None:
xlabel = 'Date'
else:
xlabel = 'Date (' + self.tz + ')'
plt.gca().set(title=title, xlabel=xlabel, ylabel="")
plt.show()
return None
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
### FUNCTIONS HELPING TO CREATE A TIMESERIES ###
#@typechecked
def type_to_series(series_type):
"""
Returns the class TimeSeries or CatTimeSeries
depending on whether it receives 'TS' or 'CTS' argument.
"""
if series_type == 'TS':
return TimeSeries
elif series_type == 'CTS':
return CatTimeSeries
if series_type == None:
return TimeSeries
else:
raise ValueError("Series type not recognized.")
#@typechecked
def build_from_csv(tz: Union[str, list, None]=None,
unit: Union[str, list, None]=None,
name: Union[str, list, None]=None,
type: Union[str, list, None]=None,
**kwargs: Any
):
"""
Returns a list of time series from the reading of a .csv file.
This function uses the function pandas.read_csv().
Arguments
---------
tz : str or list of str.
Timezone name or list of timezone names.
unit : str or list of str
Unit name or list of unit names.
name : str or list of str
Time series name or list of time series names.
type : str or list of str
Time series type or list of time series types.
**kwargs
Arbitrary keyword arguments for pandas.read_csv().
Returns
-------
List of TimeSeries
Time series built from the .csv file.
Notes
-----
To learn more about pandas.read_csv(), please refer to:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
"""
# Import data into a DataFrame
data = pd.read_csv(**kwargs)
ncols = data.shape[1]
# Return a time series
if ncols == 1 :
if type is not None:
return type_to_series(series_type=type)(data=data, tz=tz, unit=unit, name=name)
else:
return type_to_series(series_type='TS')(data=data, tz=tz, unit=unit, name=name)
# or return a list of time series
else:
# Checks
if tz is not None:
assert(isinstance(tz, list))
assert(len(tz) == ncols)
else:
tz = [None] * ncols
if unit is not None:
assert(isinstance(unit, list))
assert(len(unit) == ncols)
else:
unit = [None] * ncols
if name is not None:
assert(isinstance(name, list))
assert(len(name) == ncols)
else:
name = [None] * ncols
if type is not None:
assert(isinstance(type, list))
assert(len(type) == ncols)
else:
type = ['TS'] * ncols
# Fill up a list with time series
ts_list = []
for i in range(ncols):
ts_list.append( type_to_series(type[i])(data=pd.Series(data.iloc[:,i]),
tz=tz[i],
unit=unit[i],
name=name[i]) )
return ts_list
@typechecked
def build_from_excel(tz: Union[str, list]=None,
unit: Union[str, list]=None,
name: Union[str, list]=None,
type: Union[str, list]=None,
**kwargs: Any
) -> list:
"""
Returns a list of time series from the reading of an excel file.
This function uses the function pandas.read_excel().
Arguments
---------
tz : str or list of str.
Timezone name or list of timezone names.
unit : str or list of str
Unit name or list of unit names.
name : str or list of str
Time series name or list of time series names.
type : str or list of str
Time series type or list of time series types.
**kwargs
Arbitrary keyword arguments for pandas.read_excel().
Returns
-------
List of TimeSeries
Time series built from the excel file.
Notes
-----
To learn more about pandas.read_excel(), please refer to:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
"""
# Import data into a DataFrame
data = pd.read_excel(**kwargs)
ncols = data.shape[1]
# Return a time series
if ncols == 1 :
return type_to_series(series_type=type)(data, tz=tz, unit=unit, name=name)
# or return a list of time series
else:
# Checks
if tz is not None:
assert(isinstance(tz, list))
assert(len(tz)==ncols)
else:
tz = [None] * ncols
if unit is not None:
assert(isinstance(unit, list))
assert(len(unit)==ncols)
else:
unit = [None] * ncols
if name is not None:
assert(isinstance(name, list))
assert(len(name)==ncols)
else:
name = [None] * ncols
if type is not None:
assert(isinstance(type, list))
assert(len(type)==ncols)
else:
type = ['TS'] * ncols
# Fill up a list with time series
ts_list = []
for i in range(ncols):
ts_list.append( type_to_series(type[i])(pd.Series(data.iloc[:,i]), tz=tz[i],
unit=unit[i],
name=name[i]) )
return ts_list
@typechecked
def build_from_dataframe(data: pd.DataFrame,
tz: Union[str, list]=None,
unit: Union[str, list]=None,
name: Union[str, list]=None,
type: Union[str, list]=None
) -> list:
"""
Returns a list of time series from the reading of Pandas DataFrame.
Arguments
---------
data : pandas.DataFrame
Data frame to build the time series from.
tz : str or list of str.
Timezone name or list of timezone names.
unit : str or list of str
Unit name or list of unit names.
name : str or list of str
Time series name or list of time series names.
type : str or list of str
Time series type or list of time series types.
Returns
-------
List of TimeSeries
Time series built from the Pandas DataFrame.
"""
# Import data into a DataFrame
ncols = data.shape[1]
# Return a time series
if ncols == 1 :
return type_to_series(series_type=type)(data, tz=tz, unit=unit, name=name)
# or return a list of time series
else:
# Checks
if tz is not None:
assert(isinstance(tz, list))
assert(len(tz)==ncols)
else:
tz = [None] * ncols
if unit is not None:
assert(isinstance(unit, list))
assert(len(unit)==ncols)
else:
unit = [None] * ncols
if name is not None:
assert(isinstance(name, list))
assert(len(name)==ncols)
else:
name = [None] * ncols
if type is not None:
assert(isinstance(type, list))
assert(len(type)==ncols)
else:
type = ['TS'] * ncols
# Fill up a list with time series
ts_list = []
for i in range(ncols):
ts_list.append( type_to_series(type[i])(pd.Series(data.iloc[:,i]), tz=tz[i],
unit=unit[i],
name=name[i]) )
return ts_list
@typechecked
def build_from_list(list_values: Union[list, np.ndarray],
tz: str=None,
unit: str=None,
name: str="",
**kwargs: Any
) -> Union[TimeSeries, CatTimeSeries]:
"""
Returns a time series or categorical time series from the reading of a list.
Parameters
----------
list_values : list of float or str
List of values to generate either a TimeSeries or a CatTimeSeries.
tz : str
Time zone of the time series.
unit : str
Unit of the time series values when generating a TimeSeries.
name : str
Name or nickname of the series.
**kwargs
Arbitrary keyword arguments for pandas.date_range().
Returns
-------
TimeSeries
Time series built from the list of values and dates.
Notes
-----
For pandas.date_range please consult the following page:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html
"""
# Generate index
data_index = pd.date_range(**kwargs)
T = len(data_index)
# Making series
data = pd.Series(index=data_index, data=list_values)
# Checks
try:
assert(len(list_values)==T)
except IndexError:
raise IndexError("Size of the index does not equate the length of list_values.")
# If the first value is a string, make a CatTimeSeries
if type(list_values[0]) == str:
ts = CatTimeSeries(data, tz=tz, unit=unit, name=name)
# If the first value isn't a string, make a TimeSeries
else:
ts = TimeSeries(data, tz=tz, unit=unit, name=name)
return ts
@typechecked
def build_from_lists(list_dates: list,
list_values: list,
tz: str=None,
unit: str=None,
name: str=""
) -> Union['TimeSeries', 'CatTimeSeries']:
"""
Returns a time series or categorical time series from the reading of lists.
Parameters
----------
list_dates : list of datetimes or str
List of values to generate either a TimeSeries or a CatTimeSeries.
list_values : list of float or str
List of values to generate either a TimeSeries or a CatTimeSeries.
tz : str
Time zone of the time series.
unit : str
Unit of the time series values when generating a TimeSeries.
name : str
Name or nickname of the series.
Returns
-------
TimeSeries
Time series built from the lists of values and dates.
"""
# Checks
try:
assert(len(list_dates)==len(list_values))
except IndexError:
raise IndexError("Lengths of list_dates and list_values should be equal.")
# Making series
data = pd.Series(index=list_dates, data=list_values)
# If the first value is a string, make a CatTimeSeries
if type(list_values[0]) == str:
ts = CatTimeSeries(data, tz=tz, unit=unit, name=name)
# If the first value isn't a string, make a TimeSeries
else:
ts = TimeSeries(data, tz=tz, unit=unit, name=name)
return ts
### FUNCTIONS USING TIMESERIES AS ARGUMENTS ###
@typechecked
def linear_tvalue(data: Union[list, np.ndarray, pd.Series]) -> float:
"""
Compute the t-value from a linear trend.
Arguments
---------
data : list of floats, numpy.array or pandas.Series
Data to compute the linear t_value from.
Returns
-------
float
Linear t-value for the provided data.
Notes
-----
Function adapted from "Machine Learning for Asset Managers",
<NAME> (2020).
"""
# Checks
if not isinstance(data, list) and not isinstance(data, np.ndarray) and not isinstance(data, pd.Series):
raise TypeError("Argument data must be a list, a numpy.ndarray or a pandas.Series.")
# Initializations
if isinstance(data, list):
N = len(data)
else:
N = data.shape[0]
# Prepare linear trend
x = np.ones((N, 2))
x[:,1] = np.arange(N)
# Fit the linear trend
ols = OLS(data, x).fit()
# Get linear tvalue
tval = ols.tvalues[1]
return tval
#@typechecked
def bins_from_trend(ts: TimeSeries,
max_span: list,
return_df: bool=False
):
"""
Derive labels from the sign of the t-value of linear trend
and return a CatTimeSeries representing the labels.
Arguments
---------
ts : TimeSeries
Time series from which we want to extract bins.
max_span : list of 3 integer
Characteristics of the horizon used to search for maximum linear t-value.
Represents [index min, index max, step size].
return_df : bool
Option to return the data frame with bin information.
Returns
-------
CatTimeSeries
Categorical time series representing the trend sign.
pandas.DataFrame
Data frame containing information about the constructed bins.
Notes
-----
Function adapted from "Machine Learning for Asset Managers",
<NAME> (2020).
"""
# Checks
if not isinstance(max_span, list) and (len(max_span) != 3):
raise AssertionError("max_span must be a list of 3 integers.")
# Initializations
out = pd.DataFrame(index=ts.data.index, columns=['trend_end_time', 't_value', 'trend_sign'])
horizons = range(*max_span)
# Going through time
for t0 in ts.data.index:
s0 = pd.Series()
iloc0 = ts.data.index.get_loc(t0)
if iloc0 + max(horizons) > ts.data.shape[0]:
continue
for horizon in horizons:
t1 = ts.data.index[iloc0 + horizon - 1]
s1 = ts.data.loc[t0:t1]
s0.loc[t1] = linear_tvalue(s1.values)
t1 = s0.replace([-np.inf, np.inf, np.nan], 0).abs().idxmax()
out.loc[t0, ['trend_end_time','t_value','trend_sign']] = s0.index[-1], s0[t1], np.sign(s0[t1])
out['trend_end_time'] = pd.to_datetime(out['trend_end_time'])
out['trend_sign'] = pd.to_numeric(out['trend_sign'], downcast='signed')
# Make data frame to output
df = out.dropna(subset=['trend_sign'])
# Make CatTimeSeries
df_tmp = pd.DataFrame(index=df.index, data=df['trend_sign'])
cts = CatTimeSeries(data=df_tmp, tz=ts.tz, unit=ts.unit, name="Trend from " + ts.name)
# Return
if return_df is True:
return cts, df
else:
return cts
@typechecked
def tick_imbalance(ts: TimeSeries, name: str=None) -> TimeSeries:
"""
Computes the tick imbalance from a time series.
Arguments
---------
ts : TimeSeries
Time series we want to extract tick imbalance from.
name : str
Name of the new time series.
Returns
-------
TimeSeries
Time series representing tick imbalance.
Notes
-----
Function adapted from "Advances in Financial Machine Learning",
<NAME> (2018).
"""
# Initialization
delta = ts.percent_change()
T = delta.nvalues
# Create the imbalance data
tick_imb_data = [np.abs(delta.data.values[0]) / delta.data.values[0]]
for t in range(1,T,1):
if delta.data.values[t] == 0.:
tick_imb_data.append(tick_imb_data[t-1])
else:
tick_imb_data.append(np.abs(delta.data.values[t]) / delta.data.values[t])
# Make DataFrame and TimeSeries
tick_imb_df = | pd.DataFrame(index=delta.data.index, data=tick_imb_data) | pandas.DataFrame |
# Necessary libraries
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.offline as py
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
import pickle
# Datafile load
datafile = | pd.read_csv("zomato.csv") | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step01 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step01&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-1).
# +
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.pricing import bsm_function, bootstrap_nelson_siegel, \
implvol_delta2m_moneyness
from arpym.tools import aggregate_rating_migrations, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-parameters)
# +
# set current time t_now
t_now = np.datetime64('2012-08-31')
# set start date for data selection
t_first = np.datetime64('2009-11-02')
# set initial portfolio construction date t_init
t_init = np.datetime64('2012-08-30')
# stocks - must include GE and JPM
stock_names = ['GE', 'JPM', 'A', 'AA', 'AAPL'] # stocks considered
# make sure stock names includes GE and JPM
stock_names = ['GE', 'JPM'] + [stock
for stock in stock_names
if stock not in ['GE', 'JPM']]
print('Stocks considered:', stock_names)
# options on S&P 500
k_strk = 1407 # strike value of options on S&P 500 (US dollars)
tend_option = np.datetime64('2013-08-26') # options expiry date
y = 0.01 # level for yield curve (assumed flat and constant)
l_ = 9 # number of points on the m-moneyness grid
# corporate bonds
# expiry date of the GE coupon bond to extract
tend_ge = np.datetime64('2013-09-16')
# expiry date of the JPM coupon bond to extract
tend_jpm = np.datetime64('2014-01-15')
# starting ratings following the table:
# "AAA" (0), "AA" (1), "A" (2), "BBB" (3), "BB" (4), "B" (5),
# "CCC" (6), "D" (7)
ratings_tnow = np.array([5, # initial credit rating for GE (corresponding to B)
3]) # initial credit rating for JPM (corresponding to BBB)
# start of period for aggregate credit risk drivers
tfirst_credit = np.datetime64('1995-01-01')
# end of period for aggregate credit risk drivers
tlast_credit = np.datetime64('2004-12-31')
# index of risk driver to plot
d_plot = 1
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step00): Import data
# +
# upload data
# stocks
stocks_path = '../../../databases/global-databases/equities/db_stocks_SP500/'
db_stocks = pd.read_csv(stocks_path + 'db_stocks_sp.csv', skiprows=[0],
index_col=0)
db_stocks.index = pd.to_datetime(db_stocks.index)
# implied volatility of option on S&P 500 index
path = '../../../databases/global-databases/derivatives/db_implvol_optionSPX/'
db_impliedvol = pd.read_csv(path + 'data.csv',
index_col=['date'], parse_dates=['date'])
implvol_param = pd.read_csv(path + 'params.csv', index_col=False)
# corporate bonds: GE and JPM
jpm_path = \
'../../../databases/global-databases/fixed-income/db_corporatebonds/JPM/'
db_jpm = pd.read_csv(jpm_path + 'data.csv',
index_col=['date'], parse_dates=['date'])
jpm_param = pd.read_csv(jpm_path + 'params.csv',
index_col=['expiry_date'], parse_dates=['expiry_date'])
jpm_param['link'] = ['dprice_'+str(i) for i in range(1, jpm_param.shape[0]+1)]
ge_path = '../../../databases/global-databases/fixed-income/db_corporatebonds/GE/'
db_ge = pd.read_csv(ge_path + 'data.csv',
index_col=['date'], parse_dates=['date'])
ge_param = pd.read_csv(ge_path + 'params.csv',
index_col=['expiry_date'], parse_dates=['expiry_date'])
ge_param['link'] = ['dprice_'+str(i) for i in range(1, ge_param.shape[0]+1)]
# ratings
rating_path = '../../../databases/global-databases/credit/db_ratings/'
db_ratings = pd.read_csv(rating_path+'data.csv', parse_dates=['date'])
# ratings_param represents all possible ratings i.e. AAA, AA, etc.
ratings_param = pd.read_csv(rating_path+'params.csv', index_col=0)
ratings_param = np.array(ratings_param.index)
c_ = len(ratings_param)-1
# define the date range of interest
dates = db_stocks.index[(db_stocks.index >= t_first) &
(db_stocks.index <= t_now)]
dates = np.intersect1d(dates, db_impliedvol.index)
dates = dates.astype('datetime64[D]')
# the corporate bonds time series is shorter; select the bonds dates
ind_dates_bonds = np.where((db_ge.index >= dates[0]) &
(db_ge.index <= t_now))
dates_bonds = np.intersect1d(db_ge.index[ind_dates_bonds], db_jpm.index)
dates_bonds = dates_bonds.astype('datetime64[D]')
# length of the time series
t_ = len(dates)
t_bonds = len(dates_bonds)
# initialize temporary databases
db_risk_drivers = {}
v_tnow = {}
v_tinit = {}
risk_drivers_names = {}
v_tnow_names = {}
# implied volatility parametrized by time to expiry and delta-moneyness
tau_implvol = np.array(implvol_param.time2expiry)
tau_implvol = tau_implvol[~np.isnan(tau_implvol)]
delta_moneyness = np.array(implvol_param.delta)
implvol_delta_moneyness_2d = \
db_impliedvol.loc[(db_impliedvol.index.isin(dates)),
(db_impliedvol.columns != 'underlying')]
k_ = len(tau_implvol)
# unpack flattened database (from 2d to 3d)
implvol_delta_moneyness_3d = np.zeros((t_, k_, len(delta_moneyness)))
for k in range(k_):
implvol_delta_moneyness_3d[:, k, :] = \
np.r_[np.array(implvol_delta_moneyness_2d.iloc[:, k::k_])]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step01): Stocks
# +
n_stocks = len(stock_names) # number of stocks
d_stocks = n_stocks # one risk driver for each stock
for d in range(d_stocks):
# calculate time series of stock risk drivers
db_risk_drivers[d] = np.log(np.array(db_stocks.loc[dates, stock_names[d]]))
risk_drivers_names[d] = 'stock '+stock_names[d]+'_log_value'
# stock value
v_tnow[d] = db_stocks.loc[t_now, stock_names[d]]
v_tinit[d] = db_stocks.loc[t_init, stock_names[d]]
v_tnow_names[d] = 'stock '+stock_names[d]
# number of risk drivers, to be updated at every insertion
d_ = d_stocks
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step02): S&P 500 Index
# +
# calculate risk driver of the S&P 500 index
db_risk_drivers[d_] = \
np.log(np.array(db_impliedvol.loc[(db_impliedvol.index.isin(dates)),
'underlying']))
risk_drivers_names[d_] = 'sp_index_log_value'
# value of the S&P 500 index
v_tnow[d_] = db_impliedvol.loc[t_now, 'underlying']
v_tinit[d_] = db_impliedvol.loc[t_init, 'underlying']
v_tnow_names[d_] = 'sp_index'
# update counter
d_ = d_+1
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step03): Call and put options on the S&P 500 Index
# +
# from delta-moneyness to m-moneyness parametrization
implvol_m_moneyness_3d, m_moneyness = \
implvol_delta2m_moneyness(implvol_delta_moneyness_3d, tau_implvol,
delta_moneyness, y*np.ones((t_, k_)),
tau_implvol, l_)
# calculate log implied volatility
log_implvol_m_moneyness_2d = \
np.log(np.reshape(implvol_m_moneyness_3d,
(t_, k_*(l_)), 'F'))
# value of the underlying
s_tnow = v_tnow[d_stocks]
s_tinit = v_tinit[d_stocks]
# time to expiry (in years)
tau_option_tnow = np.busday_count(t_now, tend_option)/252
tau_option_tinit = np.busday_count(t_init, tend_option)/252
# moneyness
moneyness_tnow = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow)
moneyness_tinit = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow)
# grid points
points = list(zip(*[grid.flatten() for grid in np.meshgrid(*[tau_implvol,
m_moneyness])]))
# known values
values = implvol_m_moneyness_3d[-1, :, :].flatten('F')
# implied volatility (interpolated)
impl_vol_tnow = \
interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tnow,
moneyness_tnow])
impl_vol_tinit = \
interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tinit,
moneyness_tinit])
# compute call option value by means of Black-Scholes-Merton formula
v_call_tnow = bsm_function(s_tnow, y, impl_vol_tnow, moneyness_tnow, tau_option_tnow)
v_call_tinit = bsm_function(s_tinit, y, impl_vol_tinit, moneyness_tinit,
tau_option_tinit)
# compute put option value by means of the put-call parity
v_zcb_tnow = np.exp(-y*tau_option_tnow)
v_put_tnow = v_call_tnow - s_tnow + k_strk*v_zcb_tnow
v_zcb_tinit = np.exp(-y*tau_option_tinit)
v_put_tinit = v_call_tinit - s_tinit + k_strk*v_zcb_tinit
# store data
d_implvol = log_implvol_m_moneyness_2d.shape[1]
for d in np.arange(d_implvol):
db_risk_drivers[d_+d] = log_implvol_m_moneyness_2d[:, d]
risk_drivers_names[d_+d] = 'option_spx_logimplvol_mtau_' + str(d+1)
v_tnow[d_] = v_call_tnow
v_tinit[d_] = v_call_tinit
v_tnow_names[d_] = 'option_spx_call'
v_tnow[d_+1] = v_put_tnow
v_tinit[d_+1] = v_put_tinit
v_tnow_names[d_+1] = 'option_spx_put'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow)
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step04): Corporate bonds
# +
n_bonds = 2
# GE bond
# extract coupon
coupon_ge = ge_param.loc[tend_ge, 'coupons']/100
# rescaled dirty prices of GE bond
v_bond_ge = db_ge.loc[db_ge.index.isin(dates_bonds)]/100
# computation of Nelson-Siegel parameters for GE bond
theta_ge = np.zeros((t_bonds, 4))
theta_ge = bootstrap_nelson_siegel(v_bond_ge.values, dates_bonds,
np.array(ge_param.coupons/100),
ge_param.index.values.astype('datetime64[D]'))
# risk drivers for bonds are Nelson-Siegel parameters
for d in np.arange(4):
if d == 3:
db_risk_drivers[d_+d] = np.sqrt(theta_ge[:, d])
else:
db_risk_drivers[d_+d] = theta_ge[:, d]
risk_drivers_names[d_+d] = 'ge_bond_nel_sieg_theta_' + str(d+1)
# store dirty price of GE bond
# get column variable name in v_bond_ge that selects bond with correct expiry
ge_link = ge_param.loc[tend_ge, 'link']
v_tnow[n_] = v_bond_ge.loc[t_now, ge_link]
v_tinit[n_] = v_bond_ge.loc[t_init, ge_link]
v_tnow_names[n_] = 'ge_bond'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow_names)
# JPM bond
# extract coupon
coupon_jpm = jpm_param.loc[tend_jpm, 'coupons']/100
# rescaled dirty prices of JPM bond
v_bond_jpm = db_jpm.loc[db_ge.index.isin(dates_bonds)]/100
# computation of Nelson-Siegel parameters for JPM bond
theta_jpm = np.zeros((t_bonds, 4))
theta_jpm = bootstrap_nelson_siegel(v_bond_jpm.values, dates_bonds,
np.array(jpm_param.coupons/100),
jpm_param.index.values.astype('datetime64[D]'))
# risk drivers for bonds are Nelson-Siegel parameters
for d in np.arange(4):
if d == 3:
db_risk_drivers[d_+d] = np.sqrt(theta_jpm[:, d])
else:
db_risk_drivers[d_+d] = theta_jpm[:, d]
risk_drivers_names[d_+d] = 'jpm_bond_nel_sieg_theta_'+str(d+1)
# store dirty price of JPM bond
# get column variable name in v_bond_ge that selects bond with correct expiry
jpm_link = jpm_param.loc[tend_jpm, 'link']
v_tnow[n_] = v_bond_jpm.loc[t_now, jpm_link]
v_tinit[n_] = v_bond_jpm.loc[t_init, jpm_link]
v_tnow_names[n_] = 'jpm_bond'
# update counter
d_ = len(db_risk_drivers)
n_ = len(v_tnow)
# fill the missing values with nan's
for d in range(d_stocks+1+d_implvol,
d_stocks+1+d_implvol+n_bonds*4):
db_risk_drivers[d] = np.concatenate((np.zeros(t_-t_bonds),
db_risk_drivers[d]))
db_risk_drivers[d][:t_-t_bonds] = np.NAN
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step05): Credit
# +
# extract aggregate credit risk drivers
dates_credit, n_obligors, n_cum_trans, *_ = \
aggregate_rating_migrations(db_ratings, ratings_param, tfirst_credit,
tlast_credit)
# number of obligors in each rating at each t
t_credit = len(dates_credit) # length of the time series
credit_types = {}
credit_series = {}
for c in np.arange(c_+1):
credit_types[c] = 'n_oblig_in_state_'+ratings_param[c]
credit_series[c] = n_obligors[:, c]
d_credit = len(credit_series)
# cumulative number of migrations up to time t for each pair of rating buckets
for i in np.arange(c_+1):
for j in np.arange(c_+1):
if i != j:
credit_types[d_credit] = \
'n_cum_trans_'+ratings_param[i]+'_'+ratings_param[j]
credit_series[d_credit] = n_cum_trans[:, i, j]
d_credit = len(credit_series)
# -
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step06): Save databases
# +
path = '../../../databases/temporary-databases/'
# market risk drivers
out = pd.DataFrame({risk_drivers_names[d]: db_risk_drivers[d]
for d in range(len(db_risk_drivers))}, index=dates)
out = out[list(risk_drivers_names.values())]
out.index.name = 'dates'
out.to_csv(path+'db_riskdrivers_series.csv')
del out
# aggregate credit risk drivers
out = pd.DataFrame({credit_types[d]: credit_series[d]
for d in range(d_credit)},
index=dates_credit)
out = out[list(credit_types.values())]
out.index.name = 'dates'
out.to_csv(path+'db_riskdrivers_credit.csv')
del out
# values of all instruments at t_now
out = pd.DataFrame({v_tnow_names[n]: pd.Series(v_tnow[n])
for n in range(len(v_tnow))})
out = out[list(v_tnow_names.values())]
out.to_csv(path+'db_v_tnow.csv',
index=False)
del out
# values of all instruments at t_init
out = pd.DataFrame({v_tnow_names[n]: | pd.Series(v_tinit[n]) | pandas.Series |
# script that builds on the column diagnostic code to rename cols as needed in order to to reconcile across years and then rowbinds the 2014-2021 datasets.
# load necessary packages
import pandas as pd
import openpyxl as openpyxl
import os
from os import listdir
from pathlib import Path
import re
import numpy as np
dirname = os.path.dirname(__file__)
dropbox_general = str(Path(dirname).parents[1])
DROPBOX_DATA_PATH = os.path.join(dropbox_general,
"qss20_finalproj_rawdata/summerwork/")
DROPBOX_RAW_PATH = os.path.join(DROPBOX_DATA_PATH,
"raw/")
DROPBOX_INT_PATH = os.path.join(DROPBOX_DATA_PATH,
"intermediate/")
all_disclosure_files = [file for file in listdir(DROPBOX_RAW_PATH) if
"H_2A_Disclosure_Data" in file]
## Read in datasets (storing in dictionary with key as year)
disc_files = {}
for file in all_disclosure_files:
key_name = "df_" + re.findall("20[0-9][0-9]", file)[0]
print("Reading file: " + file)
disc_files[key_name] = | pd.read_excel(DROPBOX_RAW_PATH + file) | pandas.read_excel |
import datetime
import logging
import coloredlogs
import pandas as pd
from common import right_form_from_number
coloredlogs.install()
logging.basicConfig(level=logging.DEBUG)
tea = pd.read_csv(
open("Концерты Чайковский.csv", "r", encoding="utf-8"),
encoding="utf-8",
parse_dates=[1, 2],
)
rah = pd.read_csv(
open("Концерты_Рахманинов.csv", "r", encoding="utf-8"),
encoding="utf-8",
parse_dates=[1, 2],
)
res = pd.concat([tea, rah])
res["дата, гггг-мм-дд"] = pd.to_dateti | me(res["дата, гггг-мм-дд"]) | pandas.to_datetime |
"""The Model class is the main object for creating model in Pastas.
Examples
--------
>>> oseries = pd.Series([1,2,1], index=pd.to_datetime(range(3), unit="D"))
>>> ml = Model(oseries)
"""
from collections import OrderedDict
from copy import copy
from inspect import isclass
from logging import getLogger
from os import getlogin
import numpy as np
import pandas as pd
from .decorators import get_stressmodel
from .io.base import dump, load_model
from .modelstats import Statistics
from .noisemodels import NoiseModel
from .plots import Plotting
from .solver import LeastSquares
from .stressmodels import Constant
from .timeseries import TimeSeries
from .utils import get_dt, get_time_offset, get_sample, \
frequency_is_supported, validate_name
from .version import __version__
class Model:
"""Initiates a time series model.
Parameters
----------
oseries: pandas.Series or pastas.TimeSeries
pandas Series object containing the dependent time series. The
observation can be non-equidistant.
constant: bool, optional
Add a constant to the model (Default=True).
noisemodel: bool, optional
Add the default noisemodel to the model. A custom noisemodel can be
added later in the modelling process as well.
name: str, optional
String with the name of the model, used in plotting and saving.
metadata: dict, optional
Dictionary containing metadata of the oseries, passed on the to
oseries when creating a pastas TimeSeries object. hence,
ml.oseries.metadata will give you the metadata.
Returns
-------
ml: pastas.Model
Pastas Model instance, the base object in Pastas.
Examples
--------
>>> oseries = pd.Series([1,2,1], index=pd.to_datetime(range(3), unit="D"))
>>> ml = Model(oseries)
"""
def __init__(self, oseries, constant=True, noisemodel=True, name=None,
metadata=None):
self.logger = getLogger(__name__)
# Construct the different model components
self.oseries = TimeSeries(oseries, settings="oseries",
metadata=metadata)
if name is None:
name = self.oseries.name
if name is None:
name = 'Observations'
self.name = validate_name(name)
self.parameters = pd.DataFrame(
columns=["initial", "name", "optimal", "pmin", "pmax", "vary",
"stderr"])
# Define the model components
self.stressmodels = OrderedDict()
self.constant = None
self.transform = None
self.noisemodel = None
# Default solve/simulation settings
self.settings = {
"tmin": None,
"tmax": None,
"freq": "D",
"warmup": pd.Timedelta(days=3650),
"time_offset": pd.Timedelta(0),
"noise": noisemodel,
"solver": None,
"fit_constant": True,
}
if constant:
constant = Constant(initial=self.oseries.series.mean(),
name="constant")
self.add_constant(constant)
if noisemodel:
self.add_noisemodel(NoiseModel())
# File Information
self.file_info = self.get_file_info()
# initialize some attributes for solving and simulation
self.sim_index = None
self.oseries_calib = None
self.interpolate_simulation = None
self.normalize_residuals = False
self.fit = None
# Load other modules
self.stats = Statistics(self)
self.plots = Plotting(self)
self.plot = self.plots.plot # because we are lazy
def __repr__(self):
"""Prints a simple string representation of the model.
"""
template = ('{cls}(oseries={os}, name={name}, constant={const}, '
'noisemodel={noise})')
return template.format(cls=self.__class__.__name__,
os=self.oseries.name,
name=self.name,
const=not self.constant is None,
noise=not self.noisemodel is None)
def add_stressmodel(self, stressmodel, *args, replace=False):
"""Adds a stressmodel to the main model.
Parameters
----------
stressmodel: pastas.stressmodel.stressmodelBase
instance of a pastas.stressmodel object. Multiple stress models
can be provided (e.g., ml.add_stressmodel(sm1, sm2) in one call.
replace: bool, optional
replace the stressmodel if a stressmodel with the same name
already exists. Not recommended but useful at times. Default is
False.
Notes
-----
To obtain a list of the stressmodel names, type:
>>> ml.stressmodels.keys()
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="stress")
>>> ml.add_stressmodel(sm)
"""
# Method can take multiple stressmodels at once through args
if args:
for arg in args:
self.add_stressmodel(arg)
if (stressmodel.name in self.stressmodels.keys()) and not replace:
self.logger.error("The name for the stressmodel you are trying "
"to add already exists for this model. Select "
"another name.")
else:
self.stressmodels[stressmodel.name] = stressmodel
self.parameters = self.get_init_parameters(initial=False)
if self.settings["freq"] is None:
self._set_freq()
stressmodel.update_stress(freq=self.settings["freq"])
# Check if stress overlaps with oseries, if not give a warning
if (stressmodel.tmin > self.oseries.series.index.max()) or \
(stressmodel.tmax < self.oseries.series.index.min()):
self.logger.warning("The stress of the stressmodel has no "
"overlap with ml.oseries.")
def add_constant(self, constant):
"""Adds a Constant to the time series Model.
Parameters
----------
constant: pastas.Constant
Pastas constant instance, possibly more things in the future.
Examples
--------
>>> d = ps.Constant()
>>> ml.add_constant(d)
"""
self.constant = constant
self.parameters = self.get_init_parameters(initial=False)
def add_transform(self, transform):
"""Adds a Transform to the time series Model.
Parameters
----------
transform: pastas.transform
instance of a pastas.transform object.
Examples
--------
>>> tt = ps.ThresholdTransform()
>>> ml.add_transform(tt)
"""
if isclass(transform):
# keep this line for backwards compatibility for now
transform = transform()
transform.set_model(self)
self.transform = transform
self.parameters = self.get_init_parameters(initial=False)
def add_noisemodel(self, noisemodel):
"""Adds a noisemodel to the time series Model.
Parameters
----------
noisemodel: pastas.noisemodels.NoiseModelBase
Instance of NoiseModelBase
Examples
--------
>>> n = ps.NoiseModel()
>>> ml.add_noisemodel(n)
"""
self.noisemodel = noisemodel
self.noisemodel.set_init_parameters(oseries=self.oseries.series)
self.parameters = self.get_init_parameters(initial=False)
# check whether noise_alpha is not smaller than ml.settings["freq"]
freq_in_days = get_dt(self.settings["freq"])
noise_alpha = self.noisemodel.parameters.initial.iloc[0]
if freq_in_days > noise_alpha:
self.set_initial("noise_alpha", freq_in_days)
@get_stressmodel
def del_stressmodel(self, name):
""" Safely delete a stressmodel from the stressmodels dict.
Parameters
----------
name: str
string with the name of the stressmodel object.
Notes
-----
To obtain a list of the stressmodel names type:
>>> ml.stressmodels.keys()
"""
self.stressmodels.pop(name, None)
self.parameters = self.get_init_parameters(initial=False)
def del_constant(self):
""" Safely delete the constant from the Model.
"""
if self.constant is None:
self.logger.warning("No constant is present in this model.")
else:
self.constant = None
self.parameters = self.get_init_parameters(initial=False)
def del_transform(self):
"""Safely delete the transform from the Model.
"""
if self.transform is None:
self.logger.warning("No transform is present in this model.")
else:
self.transform = None
self.parameters = self.get_init_parameters(initial=False)
def del_noisemodel(self):
"""Safely delete the noisemodel from the Model.
"""
if self.noisemodel is None:
self.logger.warning("No noisemodel is present in this model.")
else:
self.noisemodel = None
self.parameters = self.get_init_parameters(initial=False)
def simulate(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None, return_warmup=False):
"""Method to simulate the time series model.
Parameters
----------
parameters: array-like, optional
Array with the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
return_warmup: bool, optional
Return the simulation including the the warmup period or not,
default is False.
Returns
-------
sim: pandas.Series
pandas.Series containing the simulated time series
Notes
-----
This method can be used without any parameters. When the model is
solved, the optimal parameters values are used and if not,
the initial parameter values are used. This allows the user to
get an idea of how the simulation looks with only the initial
parameters and no calibration.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, freq, use_oseries=False,
use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, freq, use_oseries=False,
use_stresses=True)
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
elif not isinstance(warmup, pd.Timedelta):
warmup = pd.Timedelta(days=warmup)
# Get the simulation index and the time step
sim_index = self.get_sim_index(tmin, tmax, freq, warmup)
dt = get_dt(freq)
# Get parameters if none are provided
if parameters is None:
parameters = self.get_parameters()
sim = pd.Series(data=np.zeros(sim_index.size, dtype=float),
index=sim_index, fastpath=True)
istart = 0 # Track parameters index to pass to stressmodel object
for sm in self.stressmodels.values():
contrib = sm.simulate(parameters[istart: istart + sm.nparam],
sim_index.min(), sim_index.max(), freq, dt)
sim = sim.add(contrib)
istart += sm.nparam
if self.constant:
sim = sim + self.constant.simulate(parameters[istart])
istart += 1
if self.transform:
sim = self.transform.simulate(sim, parameters[
istart:istart + self.transform.nparam])
# Respect provided tmin/tmax at this point, since warmup matters for
# simulation but should not be returned, unless return_warmup=True.
if not return_warmup:
sim = sim.loc[tmin:tmax]
if sim.hasnans:
sim = sim.dropna()
self.logger.warning('Nan-values were removed from the simulation.')
sim.name = 'Simulation'
return sim
def residuals(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None):
"""Method to calculate the residual series.
Parameters
----------
parameters: list, optional
Array of the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
Returns
-------
res: pandas.Series
pandas.Series with the residuals series.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None:
tmin = self.settings['tmin']
if tmax is None:
tmax = self.settings['tmax']
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
else:
warmup = pd.Timedelta(days=warmup)
# simulate model
sim = self.simulate(parameters, tmin, tmax, freq, warmup,
return_warmup=False)
# Get the oseries calibration series
oseries_calib = self.observations(tmin, tmax, freq)
# Get simulation at the correct indices
if self.interpolate_simulation is None:
if oseries_calib.index.difference(sim.index).size is not 0:
self.interpolate_simulation = True
self.logger.info('There are observations between the '
'simulation timesteps. Linear interpolation '
'between simulated values is used.')
if self.interpolate_simulation:
# interpolate simulation to times of observations
sim_interpolated = np.interp(oseries_calib.index.asi8,
sim.index.asi8, sim.values)
else:
# all of the observation indexes are in the simulation
sim_interpolated = sim.reindex(oseries_calib.index)
# Calculate the actual residuals here
res = oseries_calib.subtract(sim_interpolated)
if res.hasnans:
res = res.dropna()
self.logger.warning('Nan-values were removed from the residuals.')
if self.normalize_residuals:
res = res - res.values.mean()
res.name = "Residuals"
return res
def noise(self, parameters=None, tmin=None, tmax=None, freq=None,
warmup=None):
"""Method to simulate the noise when a noisemodel is present.
Parameters
----------
parameters: list, optional
Array of the parameters used in the time series model. See
Model.get_parameters() for more info if parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
Returns
-------
noise : pandas.Series
Pandas series of the noise.
Notes
-----
The noise are the time series that result when applying a noise
model.
"""
if (self.noisemodel is None) or (self.settings["noise"] is False):
self.logger.error("Noise cannot be calculated if there is no "
"noisemodel present or is not used during "
"parameter estimation.")
return None
if freq is None:
freq = self.settings["freq"]
# Get parameters if none are provided
if parameters is None:
parameters = self.get_parameters()
# Calculate the residuals
res = self.residuals(parameters, tmin, tmax, freq, warmup)
# Calculate the noise
noise = self.noisemodel.simulate(res,
parameters[-self.noisemodel.nparam:])
return noise
def observations(self, tmin=None, tmax=None, freq=None,
update_observations=False):
"""Method that returns the observations series used for calibration.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
update_observations : bool, optional
if True, force recalculation of the observations series, default
is False
Returns
-------
oseries_calib: pandas.Series
pandas series of the oseries used for calibration of the model
Notes
-----
This method makes sure the simulation is compared to the nearest
observation. It finds the index closest to sim_index, and then returns
a selection of the oseries. in the residuals method, the simulation is
interpolated to the observation-timestamps.
"""
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, freq, use_oseries=False,
use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, freq, use_oseries=False,
use_stresses=True)
if freq is None:
freq = self.settings["freq"]
for key, setting in zip([tmin, tmax, freq], ["tmin", "tmax", "freq"]):
if key != self.settings[setting]:
update_observations = True
if self.oseries_calib is None or update_observations:
oseries_calib = self.oseries.series.loc[tmin:tmax]
# sample measurements, so that frequency is not higher than model
# keep the original timestamps, as they will be used during
# interpolation of the simulation
sim_index = self.get_sim_index(tmin, tmax, freq,
self.settings["warmup"])
if not oseries_calib.empty:
index = get_sample(oseries_calib.index, sim_index)
oseries_calib = oseries_calib.loc[index]
else:
oseries_calib = self.oseries_calib
return oseries_calib
def initialize(self, tmin=None, tmax=None, freq=None, warmup=None,
noise=None, weights=None, initial=True, fit_constant=None):
"""Method to initialize the model.
This method is called by the solve-method, but can also be triggered
manually. See the solve-method for a description of the arguments.
"""
if noise is None and self.noisemodel:
noise = True
elif noise is True and self.noisemodel is None:
self.logger.warning("""Warning, solving with noisemodel while no
noisemodel is defined. No noisemodel is used.""")
noise = False
self.settings["noise"] = noise
self.settings["weights"] = weights
# Set the frequency & warmup
if freq:
self.settings["freq"] = frequency_is_supported(freq)
if warmup is not None:
self.settings["warmup"] = pd.Timedelta(days=warmup)
# Set the time offset from the frequency (this does not work as expected yet)
# self._set_time_offset()
# Set tmin and tmax
self.settings["tmin"] = self.get_tmin(tmin)
self.settings["tmax"] = self.get_tmax(tmax)
# set fit_constant
if fit_constant is not None:
self.settings["fit_constant"] = fit_constant
# make sure calibration data is renewed
self.sim_index = self.get_sim_index(self.settings["tmin"],
self.settings["tmax"],
self.settings["freq"],
self.settings["warmup"],
update_sim_index=True)
self.oseries_calib = self.observations(tmin=self.settings["tmin"],
tmax=self.settings["tmax"],
freq=self.settings["freq"],
update_observations=True)
self.interpolate_simulation = None
# Initialize parameters
self.parameters = self.get_init_parameters(noise, initial)
# Prepare model if not fitting the constant as a parameter
if not self.settings["fit_constant"]:
self.parameters.loc["constant_d", "vary"] = False
self.parameters.loc["constant_d", "initial"] = 0.0
self.normalize_residuals = True
def solve(self, tmin=None, tmax=None, freq=None, warmup=None, noise=True,
solver=None, report=True, initial=True, weights=None,
fit_constant=True, **kwargs):
"""Method to solve the time series model.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days) for which the simulation is calculated,
but not used for the calibration period.
noise: bool, optional
Argument that determines if a noisemodel is used (only if
present). The default is noise=True.
solver: pastas.solver.BaseSolver class, optional
Class used to solve the model. Options are: ps.LeastSquares
(default) or ps.LmfitSolve. A class is needed, not an instance
of the class!
report: bool, optional
Print a report to the screen after optimization finished. This
can also be manually triggered after optimization by calling
print(ml.fit_report()) on the Pastas model instance.
initial: bool, optional
Reset initial parameters from the individual stressmodels.
Default is True. If False, the optimal values from an earlier
optimization are used.
weights: pandas.Series, optional
Pandas Series with values by which the residuals are multiplied,
index-based.
fit_constant: bool, optional
Argument that determines if the constant is fitted as a parameter.
If it is set to False, the constant is set equal to the mean of
the residuals.
**kwargs: dict, optional
All keyword arguments will be passed onto minimization method
from the solver. It depends on the solver used which arguments
can be used.
Notes
-----
- The solver object including some results are stored as ml.fit. From
here one can access the covariance (ml.fit.pcov) and correlation
matrix (ml.fit.pcor).
- Each solver return a number of results after optimization. These
solver specific results are stored in ml.fit.result and can be
accessed from there.
"""
# Initialize the model
self.initialize(tmin, tmax, freq, warmup, noise, weights, initial,
fit_constant)
if self.oseries_calib.empty:
raise ValueError("Calibration series 'oseries_calib' is empty! "
"Check 'tmin' or 'tmax'.")
# Store the solve instance
if solver is None:
if self.fit is None:
self.fit = LeastSquares(ml=self)
elif not issubclass(solver, self.fit.__class__):
self.fit = solver(ml=self)
self.settings["solver"] = self.fit._name
# Solve model
success, optimal, stderr = self.fit.solve(noise=noise, weights=weights,
**kwargs)
if not success:
self.logger.warning("Model parameters could not be estimated "
"well.")
if not self.settings['fit_constant']:
# Determine the residuals and set the constant to their mean
self.normalize_residuals = False
res = self.residuals(optimal).mean()
optimal[self.parameters.name == self.constant.name] = res
self.parameters.optimal = optimal
self.parameters.stderr = stderr
if report:
print(self.fit_report())
def set_initial(self, name, value, move_bounds=False):
"""Method to set the initial value of any parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
parameters value to use as initial estimate.
move_bounds: bool, optional
Reset pmin/pmax based on new initial value.
"""
if move_bounds:
factor = value / self.parameters.loc[name, 'initial']
min_new = self.parameters.loc[name, 'pmin'] * factor
self.set_parameter(name, min_new, 'pmin')
max_new = self.parameters.loc[name, 'pmax'] * factor
self.set_parameter(name, max_new, 'pmax')
self.set_parameter(name, value, "initial")
def set_vary(self, name, value):
"""Method to set if the parameter is allowed to vary.
Parameters
----------
name: str
name of the parameter to update.
value: bool
boolean to vary a parameter (True) or not (False).
"""
self.set_parameter(name, bool(value), "vary")
def set_pmin(self, name, value):
"""Method to set the minimum value of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
minimum value for the parameter.
"""
self.set_parameter(name, value, "pmin")
def set_pmax(self, name, value):
"""Method to set the maximum values of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
maximum value for the parameter.
"""
self.set_parameter(name, value, "pmax")
def set_parameter(self, name, value, kind):
"""Internal method to set the parameter value for some kind.
"""
if name not in self.parameters.index:
msg = "parameter {} is not present in the model".format(name)
self.logger.error(msg)
raise KeyError(msg)
cat = self.parameters.loc[name, "name"]
# Because either of the following is not necessarily present
noisemodel = self.noisemodel.name if self.noisemodel else "NotPresent"
constant = self.constant.name if self.constant else "NotPresent"
if cat in self.stressmodels.keys():
self.stressmodels[cat].__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
elif cat == noisemodel:
self.noisemodel.__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
elif cat == constant:
self.constant.__getattribute__("set_" + kind)(name, value)
self.parameters.loc[name, kind] = value
def _set_freq(self):
"""Internal method to set the frequency in the settings. This is
method is not yet applied and is for future development.
"""
freqs = set()
if self.oseries.freq:
# when the oseries has a constant frequency, us this
freqs.add(self.oseries.freq)
else:
# otherwise determine frequency from the stressmodels
for stressmodel in self.stressmodels.values():
if stressmodel.stress:
for stress in stressmodel.stress:
if stress.settings['freq']:
# first check the frequency, and use this
freqs.add(stress.settings['freq'])
elif stress.freq_original:
# if this is not available, and the original frequency is, take the original frequency
freqs.add(stress.freq_original)
if len(freqs) == 1:
# if there is only one frequency, use this frequency
self.settings["freq"] = next(iter(freqs))
elif len(freqs) > 1:
# if there are more frequencies, take the highest frequency (lowest dt)
freqs = list(freqs)
dt = np.array([get_dt(f) for f in freqs])
self.settings["freq"] = freqs[np.argmin(dt)]
else:
self.logger.info("Frequency of model cannot be determined. "
"Frequency is set to daily")
self.settings["freq"] = "D"
def _set_time_offset(self):
"""Internal method to set the time offset for the model class.
Notes
-----
Method to check if the StressModel timestamps match (e.g. similar hours)
"""
time_offsets = set()
for stressmodel in self.stressmodels.values():
for st in stressmodel.stress:
if st.freq_original:
# calculate the offset from the default frequency
time_offset = get_time_offset(
st.series_original.index.min(),
self.settings["freq"])
time_offsets.add(time_offset)
if len(time_offsets) > 1:
msg = (
"The time-differences with the default frequency is not the "
"same for all stresses.")
self.logger.error(msg)
raise (Exception(msg))
if len(time_offsets) == 1:
self.settings["time_offset"] = next(iter(time_offsets))
else:
self.settings["time_offset"] = pd.Timedelta(0)
def get_stressmodel_names(self):
"""Returns list of stressmodel names"""
return list(self.stressmodels.keys())
def get_sim_index(self, tmin, tmax, freq, warmup, update_sim_index=False):
"""Internal method to get the simulation index, including the warmup.
Parameters
----------
tmin: str
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int
Warmup period (in Days).
update_sim_index : bool, optional
if True, force recalculation of sim_index, default is False
Returns
-------
sim_index: pandas.DatetimeIndex
Pandas DatetimeIndex instance with the datetimes values for
which the model is simulated.
"""
# Check if any of the settings are updated
for key, setting in zip([tmin, tmax, freq, warmup],
["tmin", "tmax", "freq", "warmup"]):
if key != self.settings[setting]:
update_sim_index = True
if self.sim_index is None or update_sim_index:
tmin = (tmin - warmup).floor(freq) + self.settings["time_offset"]
sim_index = pd.date_range(tmin, tmax, freq=freq)
else:
sim_index = self.sim_index
return sim_index
def get_tmin(self, tmin=None, freq=None, use_oseries=True,
use_stresses=False):
"""Method that checks and returns valid values for tmin.
Parameters
----------
tmin: str, optional
string with a year or date that can be turned into a pandas
Timestamp (e.g. pd.Timestamp(tmin)).
freq: str, optional
string with the frequency.
use_oseries: bool, optional
Obtain the tmin and tmax from the oseries. Default is True.
use_stresses: bool, optional
Obtain the tmin and tmax from the stresses. The minimum/maximum
time from all stresses is taken.
Returns
-------
tmin: pandas.Timestamp
returns pandas timestamps for tmin.
Notes
-----
The parameters tmin and tmax are leading, unless use_oseries is
True, then these are checked against the oseries index. The tmin and
tmax are checked and returned according to the following rules:
A. If no value for tmin is provided:
1. If use_oseries is True, tmin is based on the oseries.
2. If use_stresses is True, tmin is based on the stressmodels.
B. If a values for tmin is provided:
1. A pandas timestamp is made from the string
2. if use_oseries is True, tmin is checked against oseries.
C. In all cases an offset for the tmin is added.
A detailed description of dealing with tmin and timesteps in general
can be found in the developers section of the docs.
"""
# Get tmin from the oseries
if use_oseries:
ts_tmin = self.oseries.series.index.min()
# Get tmin from the stressmodels
elif use_stresses:
ts_tmin = pd.Timestamp.max
for stressmodel in self.stressmodels.values():
if stressmodel.tmin < ts_tmin:
ts_tmin = stressmodel.tmin
# Get tmin and tmax from user provided values
else:
ts_tmin = pd.Timestamp(tmin)
# Set tmin properly
if tmin is not None and use_oseries:
tmin = max(pd.Timestamp(tmin), ts_tmin)
elif tmin is not None:
tmin = pd.Timestamp(tmin)
else:
tmin = ts_tmin
# adjust tmin and tmax so that the time-offset is equal to the stressmodels.
if freq is None:
freq = self.settings["freq"]
tmin = tmin.floor(freq) + self.settings["time_offset"]
return tmin
def get_tmax(self, tmax=None, freq=None, use_oseries=True,
use_stresses=False):
"""Method that checks and returns valid values for tmin and tmax.
Parameters
----------
tmax: str, optional
string with a year or date that can be turned into a pandas
Timestamp (e.g. pd.Timestamp(tmax)).
freq: str, optional
string with the frequency.
use_oseries: bool, optional
Obtain the tmin and tmax from the oseries. Default is True.
use_stresses: bool, optional
Obtain the tmin and tmax from the stresses. The minimum/maximum
time from all stresses is taken.
Returns
-------
tmax: pandas.Timestamp
returns pandas timestamps for tmax.
Notes
-----
The parameters tmin and tmax are leading, unless use_oseries is
True, then these are checked against the oseries index. The tmin and
tmax are checked and returned according to the following rules:
A. If no value for tmax is provided:
1. If use_oseries is True, tmax is based on the
oseries.
2. If use_stresses is True, tmax is based on the
stressmodels.
B. If a values for tmax is provided:
1. A pandas timestamp is made from the string
2. if use_oseries is True, tmax is checked against oseries.
C. In all cases an offset for the tmax is added.
A detailed description of dealing with tmax and timesteps
in general can be found in the developers section of the docs.
"""
# Get tmax from the oseries
if use_oseries:
ts_tmax = self.oseries.series.index.max()
# Get tmax from the stressmodels
elif use_stresses:
ts_tmax = pd.Timestamp.min
for stressmodel in self.stressmodels.values():
if stressmodel.tmax > ts_tmax:
ts_tmax = stressmodel.tmax
# Get tmax from user provided values
else:
ts_tmax = pd.Timestamp(tmax)
# Set tmax properly
if tmax is not None and use_oseries:
tmax = min(pd.Timestamp(tmax), ts_tmax)
elif tmax is not None:
tmax = pd.Timestamp(tmax)
else:
tmax = ts_tmax
# adjust tmax so that the time-offset is equal to the stressmodels.
if freq is None:
freq = self.settings["freq"]
tmax = tmax.floor(freq) + self.settings["time_offset"]
return tmax
def get_init_parameters(self, noise=None, initial=True):
"""Method to get all initial parameters from the individual objects.
Parameters
----------
noise: bool, optional
Add the parameters for the noisemodel to the parameters
Dataframe or not.
initial: bool, optional
True to get initial parameters, False to get optimized parameters.
Returns
-------
parameters: pandas.DataFrame
pandas.Dataframe with the parameters.
"""
if noise is None:
noise = self.settings['noise']
parameters = pd.DataFrame(columns=["initial", "name", "optimal",
"pmin", "pmax", "vary", "stderr"])
for sm in self.stressmodels.values():
parameters = parameters.append(sm.parameters, sort=False)
if self.constant:
parameters = parameters.append(self.constant.parameters,
sort=False)
if self.transform:
parameters = parameters.append(self.transform.parameters,
sort=False)
if self.noisemodel and noise:
parameters = parameters.append(self.noisemodel.parameters,
sort=False)
# Set initial parameters to optimal parameters from model
if not initial:
paramold = self.parameters.optimal
parameters.initial.update(paramold)
parameters.optimal.update(paramold)
return parameters
def get_parameters(self, name=None):
"""Internal method to obtain the parameters needed for calculation.
This method is used by the simulation, residuals and the noise
methods as well as other methods that need parameters values as arrays.
Parameters
----------
name: str, optional
string with the name of the pastas.stressmodel object.
Returns
-------
p: numpy.ndarray
Numpy array with the parameters used in the time series model.
"""
if name:
p = self.parameters.loc[self.parameters.name == name]
else:
p = self.parameters
if p.optimal.hasnans:
self.logger.warning(
"Model is not optimized yet, initial parameters are used.")
parameters = p.initial
else:
parameters = p.optimal
return parameters.values
@get_stressmodel
def get_contribution(self, name, tmin=None, tmax=None, freq=None,
warmup=None, istress=None, return_warmup=False,
parameters=None):
"""Method to get the contribution of a stressmodel.
Parameters
----------
name: str
String with the name of the stressmodel.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float/int, optional
Warmup period (in Days).
istress: int, optional
When multiple stresses are present in a stressmodel, this keyword
can be used to obtain the contribution of an individual stress.
return_warmup: bool, optional
Include warmup in contribution calculation or not.
parameters: list or numpy.ndarray
iterable with the parameters. If none, the optimal parameters are
used when available, initial otherwise.
Returns
-------
contrib: pandas.Series
Pandas Series with the contribution.
"""
if parameters is None:
parameters = self.get_parameters(name)
if tmin is None:
tmin = self.settings['tmin']
if tmax is None:
tmax = self.settings['tmax']
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
else:
warmup = pd.Timedelta(days=warmup)
# use warmup
if tmin:
tmin_warm = | pd.Timestamp(tmin) | pandas.Timestamp |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with | ensure_clean_path(setup_path) | pandas.tests.io.pytables.common.ensure_clean_path |
# '''
# (c) REACT LAB, Harvard University
# Author: <NAME>
# '''
#!/usr/bin/env python3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import json
import glob, os
import seaborn as sns
import numpy as np
import argparse
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--file", help="Channel data file")
args = parser.parse_args()
f = open(args.file,"r")
data_json = json.loads(f.read())
#Sort the keys numerically to preserve order.
d = data_json["channel_packets"]
data_json_sorted = json.dumps({int(x):d[x] for x in d.keys()}, sort_keys=True)
data_json = json.loads(data_json_sorted)
traj = | pd.DataFrame.from_dict(data_json, orient="index") | pandas.DataFrame.from_dict |
import pandas as pd
def merge_sets(alexa_set, dga_set):
# Concatenate the domains
merged = pd.concat([alexa_set, dga_set], ignore_index=True)
return merged
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser('merge_sets.py')
parser.add_argument('alexa_set', help='Alexa set')
parser.add_argument('dga_set', help='DGA set')
parser.add_argument('set_type', help='Set type (training or test)')
parser.add_argument('output_dir', help='Directory to save the merged set')
args = parser.parse_args()
# Load sets
alexa_set = pd.read_pickle(args.alexa_set)
dga_set = | pd.read_pickle(args.dga_set) | pandas.read_pickle |
# coding: utf-8
# # Extract Load and Dispatch Signals
# Data from AEMO's MMSDM database are used to construct historic load and dispatch signals. To illustrate how these signals can be constructed we extract data for one month as a sample. As the schema is the same for all MMSDM files, signals from different periods can be constructed by changing the csv file imported. Also, signals with longer time horizons can be constructed by chaining together data from multiple months.
#
# ## Procedure
# 1. Import packages and declare paths to directories
# 2. Import datasets
# 3. Pivot dataframes extracting data from desired columns
# 4. Save data to file
#
# ## Import packages
# In[1]:
import os
import pandas as pd
# ## Paths to directories
# In[2]:
# Core data directory (common files)
data_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, os.path.pardir, 'data'))
# MMSDM data directory
mmsdm_dir = os.path.join(data_dir, 'AEMO', 'MMSDM')
# Output directory
output_dir = os.path.abspath(os.path.join(os.path.curdir, 'output'))
# ## Datasets
# ### MMSDM
# A summary of the tables used from AEMO's MMSDM database [1] is given below:
#
# | Table | Description |
# | :----- | :----- |
# |DISPATCH_UNIT_SCADA | MW dispatch at 5 minute (dispatch) intervals for DUIDs within the NEM.|
# |TRADINGREGIONSUM | Contains load in each NEM region at 30 minute (trading) intervals.|
#
# #### Unit Dispatch
# Parse and save unit dispatch data. Note that dispatch in MW is given at 5min intervals, and that the time resolution of demand data is 30min intervals, corresponding to the length of a trading period in the NEM. To align the time resolution of these signals unit dispatch data are aggregated, with mean power output over 30min intervals computed for each DUID.
# In[3]:
# Unit dispatch data
df_DISPATCH_UNIT_SCADA = pd.read_csv(os.path.join(mmsdm_dir, 'PUBLIC_DVD_DISPATCH_UNIT_SCADA_201706010000.CSV'),
skiprows=1, skipfooter=1, engine='python')
# Convert to datetime objects
df_DISPATCH_UNIT_SCADA['SETTLEMENTDATE'] = | pd.to_datetime(df_DISPATCH_UNIT_SCADA['SETTLEMENTDATE']) | pandas.to_datetime |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <elheat_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E'])
)
update vital_signs.data
set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: empl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator
#input: Year
#output:
import pandas as pd
import glob
def empl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64
#/
#nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <empl_14> */ --
WITH tbl AS (
select csa,
( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E'])
)
update vital_signs.data
set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: fam.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def fam( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Delete Unassigned--Jail
df = df[df.index != 'Unassigned--Jail']
# Move Baltimore to Bottom
bc = df.loc[ 'Baltimore City' ]
df = df.drop( df.index[1] )
df.loc[ 'Baltimore City' ] = bc
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
return df1['18Under']
# Cell
#File: female.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def female( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ]
return df1['onlyTheLadies']
# Cell
#File: femhhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: male, hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def femhhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
str19 = rootStr + ',_no_spouse_present'
femhh = str17 if year == '17' else str19 if year == '19' else str16
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100
df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households']
df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households']
df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18']
return df1['FemaleHH']
# Cell
#File: heatgas.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def heatgas( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_002E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <heatgas_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E'])
)
update vital_signs.data
set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hh40inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 25K-40K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh40inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import subprocess
import h5py
import uuid
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
from pprint import pprint
from shutil import copy
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import re
import json
class ReactiveTransportSimulatorUtil:
PREPDE_TOOLKIT_PATH = '/kb/module/lib/ReactiveTransportSimulator/Utils'
def _generate_html_report(self):
report = "<html> <head> ReactiveTransportSimulator-KBase report </head> <body> </body> </html>"
return report
class ReactiveTransportSimulatorRunBatchUtil:
def __init__(self,params):
self.params = params
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.dfu = DataFileUtil(self.callback_url)
self.output_files = []
self.html_files = []
self.data_folder = os.path.abspath('./data/')
self.shared_folder = params['shared_folder']
self.scratch_folder = os.path.join(params['shared_folder'],"scratch")
def run_batch_model(self):
print('params:',self.params)
try:
os.mkdir(self.scratch_folder)
except OSError:
print ("Creation of the directory %s failed" % self.scratch_folder)
else:
print ("Successfully created the directory %s " % self.scratch_folder)
# move file templates from data folder to scratch folder
pflotran_input_temp = os.path.join(self.data_folder,'batch_template.in')
pflotran_db_temp = os.path.join(self.data_folder,'database_template.dat')
pflotran_input = os.path.join(self.scratch_folder,'batch.in')
pflotran_db = os.path.join(self.scratch_folder,'database.dat')
stoi_csv_fba = os.path.join(self.scratch_folder,'rxn_fba.csv')
cpd_csv_fba = os.path.join(self.scratch_folder,'cpd_fba.csv')
# read inputs
print("Input FBA model: ",self.params['input_FBA_model'])
dfu = DataFileUtil(self.callback_url)
fba_model = dfu.get_objects({'object_refs': [self.params['input_FBA_model']]})['data'][0]
print("FBA model name :",fba_model['data']['name'])
nrxn = int(self.params['number_simulated_reactions'])
tot_time = float(self.params['simulation_time'])
timestep = float(self.params['snapshot_period'])
temperature = float(self.params['temperature'])
# collect the compound info
cpdid2formula = dict()
df_cpd = pd.DataFrame({'formula':[None]})
for compound in fba_model['data']['modelcompounds']:
cpdid2formula[compound['id']] = compound['formula']
if 'biom' in compound['id']:
df_cpd = df_cpd.append({'formula':'BIOMASS'}, ignore_index=True)
else:
df_cpd = df_cpd.append({'formula':compound['formula']}, ignore_index=True)
df_cpd.insert(len(df_cpd.columns),'initial_concentration(mol/L)',1,True)
df_cpd['formula'].replace('', np.nan, inplace=True)
df_cpd = df_cpd.dropna()
df_cpd.to_csv(cpd_csv_fba,index=False)
print("Compounds saved. \n")
# collect donor, acceptor, biom from reactions
"""
donor : "~/modelcompounds/id/xcpd2_c0"
acceptor : "~/modelcompounds/id/acceptor_c0"
biom : "~/modelcompounds/id/biom_c0"
"""
rxn_ref = ['r'+str(i+1) for i in range(nrxn)]
df_rxn = pd.DataFrame({'rxn_ref':rxn_ref,'rxn_id':None,'DOC_formula':None})
# selected_reactions = random.choices(fba_model['data']['modelreactions'],k=nrxn)
selected_reactions = []
selected_cpd = []
i = 0
while i < nrxn:
irxn = random.choice(fba_model['data']['modelreactions'])
acceptor_flag = False
for reagent in irxn['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
if 'acceptor' in cpdid:
acceptor_flag = True
if 'xcpd' in cpdid:
doc = cpdid2formula[cpdid]
selected_cpd.append(doc)
if acceptor_flag and selected_cpd.count(doc) == 1:
selected_reactions.append(irxn)
i += 1
for reaction_idx,reaction_val in enumerate(selected_reactions):
df_rxn['rxn_id'].iloc[reaction_idx] = reaction_val['id']
for reagent in reaction_val['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
formula = cpdid2formula[cpdid]
coef = reagent['coefficient']
if "xcpd" in cpdid:
df_rxn['DOC_formula'].iloc[reaction_idx] = formula
if "biom" in cpdid:
formula = 'BIOMASS'
if not formula in df_rxn.columns:
temp = ['0']*df_rxn.shape[0]
df_rxn.insert(len(df_rxn.columns),formula,temp,True)
df_rxn[formula].iloc[reaction_idx] = coef
else:
df_rxn[formula].iloc[reaction_idx] = coef
print(df_rxn.columns)
print(df_rxn.head())
df_rxn.to_csv(stoi_csv_fba,index=False)
print("Selected reactions saved. \n")
# read initial condition from /bin/module/data
init_cond = cpd_csv_fba
# generate sandbox file
sb_file = os.path.join(self.scratch_folder,'reaction_sandbox_pnnl_cyber.F90')
var = ['mu_max','vh','k_deg','cc','activation_energy','reference_temperature']
var_unit = ['1/sec','m^3','1/sec','M','J/mol','K']
generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_csv_fba)
print("Sandbox file generated.")
# format sandbox fortran code
fmt_sb_cmd = 'fprettify ' + sb_file
process = subprocess.Popen(fmt_sb_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Sandbox file formatted.")
# copy sandbox file to src dir and recompile pflotran
src_dir = '/bin/pflotran/src/pflotran'
copy(sb_file,src_dir)
print(os.getcwd())
compile_pflotran_cmd = 'sh ./data/compile.sh'
process = subprocess.Popen(compile_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Compile PFLOTRAN output:",output[-300:])
print("Complile PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
# generate batch input deck
self.generate_pflotran_input_batch(pflotran_input_temp,stoi_csv_fba,cpd_csv_fba,pflotran_input,tot_time,timestep,temperature)
print("Batch input deck generated.")
# generate database
update_pflotran_database(stoi_csv_fba,pflotran_db_temp,pflotran_db)
print("Database generated.")
# running pflotran
exepath = '/bin/pflotran/src/pflotran/pflotran'
run_pflotran_cmd = exepath + ' -n 1 -pflotranin ' + pflotran_input
process = subprocess.Popen(run_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Running PFLOTRAN output:",output[-300:])
print("Running PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
h5_file = os.path.join(self.scratch_folder,'batch.h5')
if os.path.isfile(h5_file):
print ("Successfully run PFLOTRAN")
else:
print ("Fail to run PFLOTRAN")
# generate plots in /kb/module/work/tmp/scratch/
self.plot_time_series_batch(h5_file)
# Attach output
self.output_files.append(
{'path': cpd_csv_fba,
'name': os.path.basename(cpd_csv_fba),
'label': os.path.basename(cpd_csv_fba),
'description': 'compounds'}
)
self.output_files.append(
{'path': stoi_csv_fba,
'name': os.path.basename(stoi_csv_fba),
'label': os.path.basename(stoi_csv_fba),
'description': 'reactions stoichiometry table'}
)
self.output_files.append(
{'path': sb_file,
'name': os.path.basename(sb_file),
'label': os.path.basename(sb_file),
'description': 'Sandbox source code'}
)
self.output_files.append(
{'path': pflotran_input,
'name': os.path.basename(pflotran_input),
'label': os.path.basename(pflotran_input),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': pflotran_db,
'name': os.path.basename(pflotran_db),
'label': os.path.basename(pflotran_db),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': h5_file,
'name': os.path.basename(h5_file),
'label': os.path.basename(h5_file),
'description': 'H5 file generated by PFLOTRAN batch reaction'}
)
fig_name = 'time_series_plot.png'
fig_file = os.path.join(self.scratch_folder,fig_name)
self.output_files.append(
{'path': fig_file,
'name': os.path.basename(fig_file),
'label': os.path.basename(fig_file),
'description': 'Plots of breakthrough curves generated by PFLOTRAN batch reaction'}
)
# Return the report
return self._generate_html_report()
def generate_pflotran_input_batch(self,batch_file,stoi_file,init_file,output_file,tot_time,timestep,temp):
file = open(batch_file,'r')
rxn_df = pd.read_csv(stoi_file)
init_df = pd.read_csv(init_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O','BIOMASS']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
init_cond = [init_df.loc[init_df['formula']==i,'initial_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
init_biom = init_df.loc[init_df['formula']=='BIOMASS','initial_concentration(mol/L)'].iloc[0]
for idx,val in enumerate(primary_species_nocharge):
print("The initial concentration of {} is {} mol/L \n".format(val,init_cond[idx]))
pri_spec = ""
pri_spec_init = ""
new_file_content = ""
for line in file:
if 'PRIMARY_SPECIES' in line:
new_file_content += line
for i in primary_species_charge:
pri_spec += " " + i + "\n"
new_file_content += " " + pri_spec + "\n"
elif 'CONSTRAINT initial' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],init_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(init_biom) + "\n"
new_file_content += " /"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'MAXIMUM_TIMESTEP_SIZE' in line:
new_file_content += " MAXIMUM_TIMESTEP_SIZE {} h".format(timestep) + "\n"
elif 'PERIODIC TIME' in line:
new_file_content += " PERIODIC TIME {} h".format(timestep) + "\n"
elif 'REFERENCE_TEMPERATURE' in line:
new_file_content += " REFERENCE_TEMPERATURE {} ! degrees C".format(temp) + "\n"
else:
new_file_content += line
writing_file = open(output_file, "w")
writing_file.write(new_file_content)
writing_file.close()
print('The batch input deck is updated.')
return
def plot_time_series_batch(self,h5_file):
obs_coord = [0.5,0.5,0.5]
file = h5py.File(h5_file,'r+')
time_str = [list(file.keys())[i] for i in range(len(list(file.keys()))) if list(file.keys())[i][0:4] == "Time"]
time_unit = time_str[0][-1]
time = sorted([float(time_str[i].split()[1]) for i in range(len(time_str))])
bound = []
bound.append(file['Coordinates']['X [m]'][0])
bound.append(file['Coordinates']['X [m]'][-1])
bound.append(file['Coordinates']['Y [m]'][0])
bound.append(file['Coordinates']['Y [m]'][-1])
bound.append(file['Coordinates']['Z [m]'][0])
bound.append(file['Coordinates']['Z [m]'][-1])
nxyz = []
nxyz.append(len(file['Coordinates']['X [m]'])-1)
nxyz.append(len(file['Coordinates']['Y [m]'])-1)
nxyz.append(len(file['Coordinates']['Z [m]'])-1)
x_coord = (np.linspace(bound[0],bound[1],nxyz[0]+1)[:-1]+np.linspace(bound[0],bound[1],nxyz[0]+1)[1:])/2
y_coord = (np.linspace(bound[2],bound[3],nxyz[1]+1)[:-1]+np.linspace(bound[2],bound[3],nxyz[1]+1)[1:])/2
z_coord = (np.linspace(bound[4],bound[5],nxyz[2]+1)[:-1]+np.linspace(bound[4],bound[5],nxyz[2]+1)[1:])/2
x_idx = np.argmin(np.absolute(x_coord-obs_coord[0]))
y_idx = np.argmin(np.absolute(y_coord-obs_coord[1]))
z_idx = np.argmin(np.absolute(z_coord-obs_coord[2]))
time_zero = "Time:"+str(" %12.5E" % 0)+str(" %s" % time_unit)
var_name = [x for x in list(file[time_zero].keys()) if 'Total' in x]
var_value = np.zeros((len(var_name),len(time)))
for i, itime in enumerate(time):
time_slice = "Time:"+str(" %12.5E" % itime)+str(" %s" % time_unit)
# print(file[time_slice][var_name].keys())
for j in range(len(var_name)):
var_value[j,i] = file[time_slice][var_name[j]][x_idx][y_idx][z_idx]
fig = plt.figure(num=1,dpi=150)
first_doc = True
for i in range(len(var_name)):
if var_name[i][6] == 'C':
if first_doc == True:
plt.plot(time,var_value[i,:],label='DOCs',color='k')[0]
first_doc = False
else:
plt.plot(time,var_value[i,:],color='k')[0]
else:
plt.plot(time,var_value[i,:],label=var_name[i])[0]
plt.ioff()
plt.xlabel("Time (%s)" %time_unit)
ylabel = 'Concentration [M]'
plt.ylabel(ylabel)
plt.legend(frameon=False,loc='upper center', bbox_to_anchor=(0.5, -0.15),ncol=3)
fig_name = 'time_series_plot.png'
fig_path = os.path.join(self.scratch_folder,fig_name)
plt.savefig(fig_path,dpi=150,bbox_inches='tight')
if os.path.isfile(fig_path):
print ("Successfully generated time series plot")
else:
print ("Fail to generate time series plot")
return
def visualize_hdf_in_html(self):
output_directory = os.path.join(self.shared_folder,'output')
os.makedirs(output_directory)
print("output dir:", output_directory)
html_file = os.path.join(output_directory,'summary.html')
fig_name = 'time_series_plot.png'
pflotran_out_name = 'batch.out'
fig_path = os.path.join(self.scratch_folder,fig_name)
pflotran_out_path = os.path.join(self.scratch_folder,pflotran_out_name)
if os.path.isfile(fig_path):
print ("Time series plot exists")
else:
print ("Time series plot does not exist")
print("figpath:",fig_path)
if os.path.isfile(pflotran_out_path):
print ("PFLOTRAN output exists")
else:
print ("PFLOTRAN output does not exist")
print("figpath:",pflotran_out_path)
copy(fig_path,'/kb/module/work/tmp/output')
copy(pflotran_out_path,'/kb/module/work/tmp/output')
with open(html_file, 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<body>
<h1>PFLOTRAN-KBbase</h1>
<p>PFLOTRAN output</p>
<embed src="batch.out" width="480" height="960">
<p>Visulize PFLOTRAN output</p>
<img src="{}" alt="Time series plot" height="360" width="480"></img>
</body>
</html>
""".format(fig_name))
with open(html_file, 'r') as f:
print("html_file:",f.readlines())
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
return {'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'HTML summary report for run_batch_model App'}
def _generate_html_report(self):
# Get the workspace name from the parameters
ws_name = self.params["workspace"]
# Visualize the result in html
html_report_viz_file = self.visualize_hdf_in_html()
self.html_files.append(html_report_viz_file)
# Save the html to the report dictionary
report_params = {
# message is an optional field.
# A string that appears in the summary section of the result page
'message': "Say something...",
# A list of typed objects created during the execution
# of the App. This can only be used to refer to typed
# objects in the workspace and is separate from any files
# generated by the app.
# See a working example here:
# https://github.com/kbaseapps/kb_deseq/blob/586714d/lib/kb_deseq/Utils/DESeqUtil.py#L262-L264
# 'objects_created': objects_created_in_app,
# A list of strings that can be used to alert the user
# 'warnings': warnings_in_app,
# The workspace name or ID is included in every report
'workspace_name': ws_name,
# A list of paths or Shock IDs pointing to
# a single flat file. They appear in Files section
'file_links': self.output_files,
# HTML files that appear in “Links”
'html_links': self.html_files,
'direct_html_link_index': 0,
'html_window_height': 333,
} # end of report_params
# Make the client, generate the report
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
# Return references which will allow inline display of
# the report in the Narrative
report_output = {'report_name': output['name'],
'report_ref': output['ref']}
return report_output
class ReactiveTransportSimulatorRun1DUtil:
def __init__(self,params):
self.params = params
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.dfu = DataFileUtil(self.callback_url)
self.output_files = []
self.html_files = []
self.data_folder = os.path.abspath('./data/')
self.shared_folder = params['shared_folder']
self.scratch_folder = os.path.join(params['shared_folder'],"scratch")
def run_1d_model(self):
print('params:',self.params)
try:
os.mkdir(self.scratch_folder)
except OSError:
print ("Creation of the directory %s failed" % self.scratch_folder)
else:
print ("Successfully created the directory %s " % self.scratch_folder)
# move file templates from data folder to scratch folder
pflotran_input_temp = os.path.join(self.data_folder,'column_template.in')
pflotran_db_temp = os.path.join(self.data_folder,'database_template.dat')
pflotran_input = os.path.join(self.scratch_folder,'column.in')
pflotran_db = os.path.join(self.scratch_folder,'database.dat')
stoi_csv_fba = os.path.join(self.scratch_folder,'rxn_fba.csv')
cpd_csv_fba = os.path.join(self.scratch_folder,'cpd_fba.csv')
# read inputs
print("Input FBA model: ",self.params['input_FBA_model'])
dfu = DataFileUtil(self.callback_url)
fba_model = dfu.get_objects({'object_refs': [self.params['input_FBA_model']]})['data'][0]
print("FBA model name :",fba_model['data']['name'])
nrxn = int(self.params['number_simulated_reactions'])
velocity = float(self.params['velocity'])
length = float(self.params['length'])
ngrid = int(self.params['number_grids'])
tot_time = float(self.params['simulation_time'])
timestep = float(self.params['snapshot_period'])
temperature = float(self.params['temperature'])
# collect the compound info
cpdid2formula = dict()
df_cpd = pd.DataFrame({'formula':[None]})
for compound in fba_model['data']['modelcompounds']:
cpdid2formula[compound['id']] = compound['formula']
if 'biom' in compound['id']:
df_cpd = df_cpd.append({'formula':'BIOMASS'}, ignore_index=True)
else:
df_cpd = df_cpd.append({'formula':compound['formula']}, ignore_index=True)
df_cpd.insert(len(df_cpd.columns),'initial_concentration(mol/L)',0.01,True)
df_cpd.loc[df_cpd.formula == 'BIOMASS', 'initial_concentration(mol/L)'] = 0.001
df_cpd.insert(len(df_cpd.columns),'inlet_concentration(mol/L)',1,True)
df_cpd.loc[df_cpd.formula == 'BIOMASS', 'inlet_concentration(mol/L)'] = 0
df_cpd['formula'].replace('', np.nan, inplace=True)
df_cpd = df_cpd.dropna()
df_cpd.to_csv(cpd_csv_fba,index=False)
print("Compounds saved. \n")
# collect donor, acceptor, biom from reactions
"""
donor : "~/modelcompounds/id/xcpd2_c0"
acceptor : "~/modelcompounds/id/acceptor_c0"
biom : "~/modelcompounds/id/biom_c0"
"""
rxn_ref = ['r'+str(i+1) for i in range(nrxn)]
df_rxn = pd.DataFrame({'rxn_ref':rxn_ref,'rxn_id':None,'DOC_formula':None})
# selected_reactions = random.choices(fba_model['data']['modelreactions'],k=nrxn)
selected_reactions = []
selected_cpd = []
i = 0
while i < nrxn:
irxn = random.choice(fba_model['data']['modelreactions'])
acceptor_flag = False
for reagent in irxn['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
if 'acceptor' in cpdid:
acceptor_flag = True
if 'xcpd' in cpdid:
doc = cpdid2formula[cpdid]
selected_cpd.append(doc)
if acceptor_flag and selected_cpd.count(doc) == 1:
selected_reactions.append(irxn)
i += 1
for reaction_idx,reaction_val in enumerate(selected_reactions):
df_rxn['rxn_id'].iloc[reaction_idx] = reaction_val['id']
for reagent in reaction_val['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
formula = cpdid2formula[cpdid]
coef = reagent['coefficient']
if "xcpd" in cpdid:
df_rxn['DOC_formula'].iloc[reaction_idx] = formula
if "biom" in cpdid:
formula = 'BIOMASS'
if not formula in df_rxn.columns:
temp = ['0']*df_rxn.shape[0]
df_rxn.insert(len(df_rxn.columns),formula,temp,True)
df_rxn[formula].iloc[reaction_idx] = coef
else:
df_rxn[formula].iloc[reaction_idx] = coef
print(df_rxn.columns)
print(df_rxn.head())
df_rxn.to_csv(stoi_csv_fba,index=False)
print("Selected reactions saved. \n")
# read initial and boundary conditions from /bin/module/data
init_cond = cpd_csv_fba
# generate sandbox file
sb_file = os.path.join(self.scratch_folder,'reaction_sandbox_pnnl_cyber.F90')
var = ['mu_max','vh','k_deg','cc','activation_energy','reference_temperature']
var_unit = ['1/sec','m^3','1/sec','M','J/mol','K']
generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_csv_fba)
print("Sandbox file generated.")
# format sandbox fortran code
fmt_sb_cmd = 'fprettify ' + sb_file
process = subprocess.Popen(fmt_sb_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Sandbox file formatted.")
# copy sandbox file to src dir and recompile pflotran
src_dir = '/bin/pflotran/src/pflotran'
copy(sb_file,src_dir)
print(os.getcwd())
compile_pflotran_cmd = 'sh ./data/compile.sh'
process = subprocess.Popen(compile_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Compile PFLOTRAN output:",output[-300:])
print("Complile PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
# generate 1d input deck
self.generate_pflotran_input_1d(pflotran_input_temp,stoi_csv_fba,cpd_csv_fba,
pflotran_input,velocity,length,ngrid,tot_time,timestep,temperature)
print("Batch input deck generated.")
# generate database
update_pflotran_database(stoi_csv_fba,pflotran_db_temp,pflotran_db)
print("Database generated.")
# running pflotran
exepath = '/bin/pflotran/src/pflotran/pflotran'
run_pflotran_cmd = exepath + ' -n 1 -pflotranin ' + pflotran_input
process = subprocess.Popen(run_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Running PFLOTRAN output:",output[-300:])
print("Running PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
h5_file = os.path.join(self.scratch_folder,'column.h5')
if os.path.isfile(h5_file):
print ("Successfully run PFLOTRAN")
else:
print ("Fail to run PFLOTRAN")
# generate plots in /kb/module/work/tmp/scratch/
# self.plot_time_series_batch(h5_file)
# Attach output
self.output_files.append(
{'path': cpd_csv_fba,
'name': os.path.basename(cpd_csv_fba),
'label': os.path.basename(cpd_csv_fba),
'description': 'compounds'}
)
self.output_files.append(
{'path': stoi_csv_fba,
'name': os.path.basename(stoi_csv_fba),
'label': os.path.basename(stoi_csv_fba),
'description': 'reactions stoichiometry table'}
)
self.output_files.append(
{'path': sb_file,
'name': os.path.basename(sb_file),
'label': os.path.basename(sb_file),
'description': 'Sandbox source code'}
)
self.output_files.append(
{'path': pflotran_input,
'name': os.path.basename(pflotran_input),
'label': os.path.basename(pflotran_input),
'description': '1d column reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': pflotran_db,
'name': os.path.basename(pflotran_db),
'label': os.path.basename(pflotran_db),
'description': '1d column reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': h5_file,
'name': os.path.basename(h5_file),
'label': os.path.basename(h5_file),
'description': 'H5 file generated by PFLOTRAN 1d column reaction'}
)
# fig_name = 'time_series_plot.png'
# fig_file = os.path.join(self.scratch_folder,fig_name)
# self.output_files.append(
# {'path': fig_file,
# 'name': os.path.basename(fig_file),
# 'label': os.path.basename(fig_file),
# 'description': 'Plots of breakthrough curves generated by PFLOTRAN batch reaction'}
# )
# Return the report
return self._generate_html_report()
def generate_pflotran_input_1d(self,template,stoi_file,icbc_file,output_file,
velocity,length,ngrid,tot_time,timestep,temp):
file = open(template,'r')
rxn_df = pd.read_csv(stoi_file)
init_df = pd.read_csv(icbc_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O','BIOMASS']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
init_cond = [init_df.loc[init_df['formula']==i,'initial_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
init_biom = init_df.loc[init_df['formula']=='BIOMASS','initial_concentration(mol/L)'].iloc[0]
inlet_cond = [init_df.loc[init_df['formula']==i,'inlet_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
inlet_biom = init_df.loc[init_df['formula']=='BIOMASS','inlet_concentration(mol/L)'].iloc[0]
for idx,val in enumerate(primary_species_nocharge):
print("The initial concentration of {} is {} mol/L \n".format(val,init_cond[idx]))
print("The inlet concentration of {} is {} mol/L \n".format(val,inlet_cond[idx]))
pri_spec = ""
pri_spec_init = ""
new_file_content = ""
for line in file:
if 'DATASET' in line:
new_file_content += ' DATASET {} 0 0 m/h'.format(velocity) + "\n"
elif 'NXYZ' in line:
new_file_content += ' NXYZ {} 1 1'.format(ngrid) + "\n"
elif 'PRIMARY_SPECIES' in line:
new_file_content += line
for i in primary_species_charge:
pri_spec += " " + i + "\n"
new_file_content += " " + pri_spec + "\n"
elif 'BOUNDS' in line:
new_file_content += line
new_file_content += " 0.d0 -1.d20 -1.d20" + "\n"
new_file_content += " {} 1.d20 1.d20".format(length) + "\n"
elif 'REGION outlet' in line:
new_file_content += line
new_file_content += " COORDINATES" + "\n"
new_file_content += " {} -1.d20 -1.d20".format(length) + "\n"
new_file_content += " {} -1.d20 -1.d20".format(length) + "\n"
new_file_content += " /" + "\n"
new_file_content += " FACE EAST" + "\n"
elif 'CONSTRAINT initial' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],init_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(init_biom) + "\n"
new_file_content += " /"
elif 'CONSTRAINT inlet' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],inlet_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(inlet_biom) + "\n"
new_file_content += " /"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'MAXIMUM_TIMESTEP_SIZE' in line:
new_file_content += " MAXIMUM_TIMESTEP_SIZE {} h".format(timestep) + "\n"
elif 'PERIODIC TIME' in line:
new_file_content += " PERIODIC TIME {} h".format(timestep) + "\n"
elif 'REFERENCE_TEMPERATURE' in line:
new_file_content += " REFERENCE_TEMPERATURE {} ! degrees C".format(temp) + "\n"
else:
new_file_content += line
writing_file = open(output_file, "w")
writing_file.write(new_file_content)
writing_file.close()
print('The batch input deck is updated.')
return
def plot_time_series_batch(self,h5_file):
obs_coord = [0.5,0.5,0.5]
file = h5py.File(h5_file,'r+')
time_str = [list(file.keys())[i] for i in range(len(list(file.keys()))) if list(file.keys())[i][0:4] == "Time"]
time_unit = time_str[0][-1]
time = sorted([float(time_str[i].split()[1]) for i in range(len(time_str))])
bound = []
bound.append(file['Coordinates']['X [m]'][0])
bound.append(file['Coordinates']['X [m]'][-1])
bound.append(file['Coordinates']['Y [m]'][0])
bound.append(file['Coordinates']['Y [m]'][-1])
bound.append(file['Coordinates']['Z [m]'][0])
bound.append(file['Coordinates']['Z [m]'][-1])
nxyz = []
nxyz.append(len(file['Coordinates']['X [m]'])-1)
nxyz.append(len(file['Coordinates']['Y [m]'])-1)
nxyz.append(len(file['Coordinates']['Z [m]'])-1)
x_coord = (np.linspace(bound[0],bound[1],nxyz[0]+1)[:-1]+np.linspace(bound[0],bound[1],nxyz[0]+1)[1:])/2
y_coord = (np.linspace(bound[2],bound[3],nxyz[1]+1)[:-1]+np.linspace(bound[2],bound[3],nxyz[1]+1)[1:])/2
z_coord = (np.linspace(bound[4],bound[5],nxyz[2]+1)[:-1]+np.linspace(bound[4],bound[5],nxyz[2]+1)[1:])/2
x_idx = np.argmin(np.absolute(x_coord-obs_coord[0]))
y_idx = np.argmin(np.absolute(y_coord-obs_coord[1]))
z_idx = np.argmin(np.absolute(z_coord-obs_coord[2]))
time_zero = "Time:"+str(" %12.5E" % 0)+str(" %s" % time_unit)
var_name = [x for x in list(file[time_zero].keys()) if 'Total' in x]
var_value = np.zeros((len(var_name),len(time)))
for i, itime in enumerate(time):
time_slice = "Time:"+str(" %12.5E" % itime)+str(" %s" % time_unit)
# print(file[time_slice][var_name].keys())
for j in range(len(var_name)):
var_value[j,i] = file[time_slice][var_name[j]][x_idx][y_idx][z_idx]
fig = plt.figure(num=1,dpi=150)
first_doc = True
for i in range(len(var_name)):
if var_name[i][6] == 'C':
if first_doc == True:
plt.plot(time,var_value[i,:],label='DOCs',color='k')[0]
first_doc = False
else:
plt.plot(time,var_value[i,:],color='k')[0]
else:
plt.plot(time,var_value[i,:],label=var_name[i])[0]
plt.ioff()
plt.xlabel("Time (%s)" %time_unit)
ylabel = 'Concentration [M]'
plt.ylabel(ylabel)
plt.legend(frameon=False,loc='upper center', bbox_to_anchor=(0.5, -0.15),ncol=3)
fig_name = 'time_series_plot.png'
fig_path = os.path.join(self.scratch_folder,fig_name)
plt.savefig(fig_path,dpi=150,bbox_inches='tight')
if os.path.isfile(fig_path):
print ("Successfully generated time series plot")
else:
print ("Fail to generate time series plot")
return
def visualize_hdf_in_html(self):
output_directory = os.path.join(self.shared_folder,'output')
os.makedirs(output_directory)
print("output dir:", output_directory)
html_file = os.path.join(output_directory,'summary.html')
fig_name = 'time_series_plot.png'
pflotran_out_name = 'batch.out'
fig_path = os.path.join(self.scratch_folder,fig_name)
pflotran_out_path = os.path.join(self.scratch_folder,pflotran_out_name)
if os.path.isfile(fig_path):
print ("Time series plot exists")
else:
print ("Time series plot does not exist")
print("figpath:",fig_path)
if os.path.isfile(pflotran_out_path):
print ("PFLOTRAN output exists")
else:
print ("PFLOTRAN output does not exist")
print("figpath:",pflotran_out_path)
# copy(fig_path,'/kb/module/work/tmp/output')
# copy(pflotran_out_path,'/kb/module/work/tmp/output')
# with open(html_file, 'w') as f:
# f.write("""
# <!DOCTYPE html>
# <html>
# <body>
# <h1>PFLOTRAN-KBbase</h1>
# <p>PFLOTRAN output</p>
# <embed src="batch.out" width="480" height="960">
# <p>Visulize PFLOTRAN output</p>
# <img src="{}" alt="Time series plot" height="360" width="480"></img>
# </body>
# </html>
# """.format(fig_name))
# test
with open(html_file, 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<body>
<h1>PFLOTRAN-KBbase</h1>
<p>PFLOTRAN output</p>
<embed src="batch.out" width="480" height="960">
<p>Visulize PFLOTRAN output</p>
<img src="" alt="Time series plot" height="360" width="480"></img>
</body>
</html>
""")
with open(html_file, 'r') as f:
print("html_file:",f.readlines())
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
return {'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'HTML summary report for run_batch_model App'}
def _generate_html_report(self):
# Get the workspace name from the parameters
ws_name = self.params["workspace"]
# Visualize the result in html
html_report_viz_file = self.visualize_hdf_in_html()
self.html_files.append(html_report_viz_file)
# Save the html to the report dictionary
report_params = {
# message is an optional field.
# A string that appears in the summary section of the result page
'message': "Say something...",
# A list of typed objects created during the execution
# of the App. This can only be used to refer to typed
# objects in the workspace and is separate from any files
# generated by the app.
# See a working example here:
# https://github.com/kbaseapps/kb_deseq/blob/586714d/lib/kb_deseq/Utils/DESeqUtil.py#L262-L264
# 'objects_created': objects_created_in_app,
# A list of strings that can be used to alert the user
# 'warnings': warnings_in_app,
# The workspace name or ID is included in every report
'workspace_name': ws_name,
# A list of paths or Shock IDs pointing to
# a single flat file. They appear in Files section
'file_links': self.output_files,
# HTML files that appear in “Links”
'html_links': self.html_files,
'direct_html_link_index': 0,
'html_window_height': 333,
} # end of report_params
# Make the client, generate the report
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
# Return references which will allow inline display of
# the report in the Narrative
report_output = {'report_name': output['name'],
'report_ref': output['ref']}
return report_output
def generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_file):
rxn_name = 'cyber'
rxn_df = pd.read_csv(stoi_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
sandbox_file = open(sb_file,'w+')
sb = '''
module Reaction_Sandbox_{}_class
use Reaction_Sandbox_Base_class
use Global_Aux_module
use Reactive_Transport_Aux_module
use PFLOTRAN_Constants_module
implicit none
private
#include "petsc/finclude/petscsys.h"
'''
sb = sb.format(rxn_name.capitalize())
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" PetscInt, parameter :: {}_MASS_STORAGE_INDEX = {}\n".format(item,idx+1)
sb = sb+'''
type, public, &
extends(reaction_sandbox_base_type) :: reaction_sandbox_{}_type
'''.format(rxn_name)
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" PetscInt :: {}_id \n".format(item.lower())
for i in var:
sb = sb+" PetscReal :: {} \n".format(i)
sb = sb+'''
PetscReal :: nrxn
PetscBool :: store_cumulative_mass
PetscInt :: offset_auxiliary
contains
procedure, public :: ReadInput => {}Read
procedure, public :: Setup => {}Setup
procedure, public :: Evaluate => {}React
procedure, public :: Destroy => {}Destroy
end type reaction_sandbox_{}_type
public :: {}Create
contains
! ************************************************************************** !
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),
rxn_name,rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function create()
#
#----------------------------------------------------------------------------
sb = sb+'''
function {}Create()
#include "petsc/finclude/petscsys.h"
use petscsys
implicit none
class(reaction_sandbox_{}_type), pointer :: {}Create
allocate({}Create)
'''.format(rxn_name.capitalize(),rxn_name,rxn_name.capitalize(),rxn_name.capitalize())
for i in primary_species_nocharge:
sb = sb+" {}Create%{}_id = UNINITIALIZED_INTEGER \n".format(rxn_name.capitalize(),i.lower())
for i in var:
if i.lower() == 'reference_temperature':
sb = sb + ' CyberCreate%reference_temperature = 298.15d0 ! 25 C\n'
else:
sb = sb+" {}Create%{} = UNINITIALIZED_DOUBLE \n".format(rxn_name.capitalize(),i)
sb = sb+'''
{}Create%nrxn = UNINITIALIZED_INTEGER
{}Create%store_cumulative_mass = PETSC_FALSE
nullify({}Create%next)
print *, '{}Creat Done'
end function {}Create
! ************************************************************************** !
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),
rxn_name.capitalize(),rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function read()
#
#----------------------------------------------------------------------------
sb = sb+'''
! ************************************************************************** !
subroutine {}Read(this,input,option)
use Option_module
use String_module
use Input_Aux_module
implicit none
class(reaction_sandbox_{}_type) :: this
type(input_type), pointer :: input
type(option_type) :: option
PetscInt :: i
character(len=MAXWORDLENGTH) :: word, internal_units, units
character(len=MAXSTRINGLENGTH) :: error_string
error_string = 'CHEMISTRY,REACTION_SANDBOX,{}'
call InputPushBlock(input,option)
do
call InputReadPflotranString(input,option)
if (InputError(input)) exit
if (InputCheckExit(input,option)) exit
call InputReadCard(input,option,word)
call InputErrorMsg(input,option,'keyword',error_string)
call StringToUpper(word)
select case(trim(word))
'''.format(rxn_name.capitalize(),rxn_name.lower(),rxn_name.upper())
for idx,item in enumerate(var):
if item!='reference_temperature':
sb = sb+'''
case('{}')
call InputReadDouble(input,option,this%{})
call InputErrorMsg(input,option,'{}',error_string)
call InputReadAndConvertUnits(input,this%{},'{}', &
trim(error_string)//',{}',option)
'''.format(item.upper(),item.lower(),item.lower(),item.lower(),
var_unit[idx],item.lower())
else:
sb = sb+'''
case('REFERENCE_TEMPERATURE')
call InputReadDouble(input,option,this%reference_temperature)
call InputErrorMsg(input,option,'reference temperature [C]', &
error_string)
this%reference_temperature = this%reference_temperature + 273.15d0
'''
sb = sb+'''
case default
call InputKeywordUnrecognized(input,word,error_string,option)
end select
enddo
call InputPopBlock(input,option)
end subroutine {}Read
! ************************************************************************** !
'''.format(rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function setup()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}Setup(this,reaction,option)
use Reaction_Aux_module, only : reaction_rt_type, GetPrimarySpeciesIDFromName
use Reaction_Immobile_Aux_module, only : GetImmobileSpeciesIDFromName
use Reaction_Mineral_Aux_module, only : GetKineticMineralIDFromName
use Option_module
implicit none
class(reaction_sandbox_{}_type) :: this
class(reaction_rt_type) :: reaction
type(option_type) :: option
character(len=MAXWORDLENGTH) :: word
PetscInt :: irxn
PetscReal, parameter :: per_day_to_per_sec = 1.d0 / 24.d0 / 3600.d0
'''.format(rxn_name.capitalize(),rxn_name.lower())
for idx,item in enumerate(primary_species_charge):
if item.upper()!='BIOMASS':
sb = sb+'''
word = '{}'
this%{}_id = &
GetPrimarySpeciesIDFromName(word,reaction,option)
'''.format(item.upper(),primary_species_nocharge[idx].lower())
else:
sb = sb+'''
word = 'BIOMASS'
this%biomass_id = &
GetImmobileSpeciesIDFromName(word,reaction%immobile,option) + reaction%offset_immobile
'''
sb = sb+'''
if (this%store_cumulative_mass) then
this%offset_auxiliary = reaction%nauxiliary
reaction%nauxiliary = reaction%nauxiliary + {}
endif
end subroutine {}Setup
! ************************************************************************** !
'''.format(len(primary_species_charge)*2,rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function PlotVariables()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}AuxiliaryPlotVariables(this,list,reaction,option)
use Option_module
use Reaction_Aux_module
use Output_Aux_module
use Variables_module, only : REACTION_AUXILIARY
implicit none
class(reaction_sandbox_{}_type) :: this
type(output_variable_list_type), pointer :: list
type(option_type) :: option
class(reaction_rt_type) :: reaction
character(len=MAXWORDLENGTH) :: names({})
character(len=MAXWORDLENGTH) :: word
character(len=MAXWORDLENGTH) :: units
PetscInt :: indices({})
PetscInt :: i
'''.format(rxn_name.capitalize(),rxn_name.lower(),len(primary_species_charge),len(primary_species_charge))
for idx,item in enumerate(primary_species_charge):
sb = sb+" names({}) = '{}'\n".format(idx+1,item.upper())
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" indices({}) = {}_MASS_STORAGE_INDEX\n".format(idx+1,item.upper())
sb = sb+'''
if (this%store_cumulative_mass) then
do i = 1, {}
word = trim(names(i)) // ' Rate'
units = 'mol/m^3-sec'
call OutputVariableAddToList(list,word,OUTPUT_RATE,units, &
REACTION_AUXILIARY, &
this%offset_auxiliary+indices(i))
enddo
do i = 1, {}
word = trim(names(i)) // ' Cum. Mass'
units = 'mol/m^3'
call OutputVariableAddToList(list,word,OUTPUT_GENERIC,units, &
REACTION_AUXILIARY, &
this%offset_auxiliary+{}+indices(i))
enddo
endif
end subroutine {}AuxiliaryPlotVariables
! ************************************************************************** !
'''.format(len(primary_species_charge),len(primary_species_charge),len(primary_species_charge),rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function react()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}React(this,Residual,Jacobian,compute_derivative, &
rt_auxvar,global_auxvar,material_auxvar,reaction, &
option)
use Option_module
use Reaction_Aux_module
use Material_Aux_class
implicit none
class(reaction_sandbox_{}_type) :: this
type(option_type) :: option
class(reaction_rt_type) :: reaction
! the following arrays must be declared after reaction
PetscReal :: Residual(reaction%ncomp)
PetscReal :: Jacobian(reaction%ncomp,reaction%ncomp)
type(reactive_transport_auxvar_type) :: rt_auxvar
type(global_auxvar_type) :: global_auxvar
class(material_auxvar_type) :: material_auxvar
PetscInt, parameter :: iphase = 1
PetscReal :: L_water
PetscReal :: kg_water
PetscInt :: i, j, irxn
'''.format(rxn_name.capitalize(),rxn_name.lower())
for idx, item in enumerate(primary_species_nocharge):
sb = sb+" PetscReal :: C_{} \n".format(item.lower())
for i in range(nrxn):
sb = sb+" PetscReal :: r{}doc,r{}o2 \n".format(i+1,i+1)
for i in range(nrxn):
sb = sb+" PetscReal :: r{}kin \n".format(i+1)
sb = sb+" PetscReal :: sumkin \n"
for i in range(nrxn):
sb = sb+" PetscReal :: u{} \n".format(i+1)
sb = sb+" PetscReal :: molality_to_molarity\n PetscReal :: temperature_scaling_factor\n PetscReal :: mu_max_scaled\n"
for i in range(nrxn):
sb = sb+" PetscReal :: k{}_scaled \n".format(i+1)
sb = sb+" PetscReal :: k_deg_scaled"
sb = sb+'''
PetscReal :: volume, rate_scale
PetscBool :: compute_derivative
PetscReal :: rate({})
volume = material_auxvar%volume
L_water = material_auxvar%porosity*global_auxvar%sat(iphase)* &
volume*1.d3 ! m^3 -> L
kg_water = material_auxvar%porosity*global_auxvar%sat(iphase)* &
global_auxvar%den_kg(iphase)*volume
molality_to_molarity = global_auxvar%den_kg(iphase)*1.d-3
if (reaction%act_coef_update_frequency /= ACT_COEF_FREQUENCY_OFF) then
option%io_buffer = 'Activity coefficients not currently supported in &
&{}React().'
call printErrMsg(option)
endif
temperature_scaling_factor = 1.d0
if (Initialized(this%activation_energy)) then
temperature_scaling_factor = &
exp(this%activation_energy/IDEAL_GAS_CONSTANT* &
(1.d0/this%reference_temperature-1.d0/(global_auxvar%temp+273.15d0)))
endif
'''.format(nrxn,rxn_name.capitalize())
sb = sb +" ! concentrations are molarities [M]"
for i in primary_species_nocharge:
if i.upper()!='BIOMASS':
sb = sb+'''
C_{} = rt_auxvar%pri_molal(this%{}_id)* &
rt_auxvar%pri_act_coef(this%{}_id)*molality_to_molarity
'''.format(i.lower(),i.lower(),i.lower())
else:
sb = sb+'''
C_biomass = rt_auxvar%immobile(this%biomass_id-reaction%offset_immobile)
'''
sb = sb +'''
mu_max_scaled = this%mu_max * temperature_scaling_factor
k_deg_scaled = this%k_deg * temperature_scaling_factor
'''
sb = sb+generate_rate_expression(primary_species_nocharge, stoi_file, rxn_name)
sb = sb+'''
end subroutine {}React
! ************************************************************************** !
subroutine {}Destroy(this)
use Utility_module
implicit none
class(reaction_sandbox_{}_type) :: this
print *, '{}Destroy Done'
end subroutine {}Destroy
end module Reaction_Sandbox_{}_class
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.lower(),
rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize())
sandbox_file.write(sb)
print('Sandbox code is generated at {}.'.format(sb_file))
return
def generate_rate_expression(primary_species_nocharge, stoi_file, rxn_name):
rxn_df = pd.read_csv(stoi_file)
rxn_df = rxn_df.set_index('rxn_ref')
rkin = {}
for i in range(len(rxn_df)):
# doc_name = rxn_df.iloc[i,0]
# doc_name = re.sub('[-+)]','',doc_name)
doc_name = rxn_df['DOC_formula'].iloc[i]
doc_name = doc_name.lower()
print(doc_name)
doc_sto = rxn_df[rxn_df['DOC_formula'].loc['r'+str(i+1)]].loc['r'+str(i+1)]
o2_sto = rxn_df['O2'].loc['r'+str(i+1)]
rdoc_i = ' r'+str(i+1)+'doc = '+'exp('+str(doc_sto)+'/(this%vh * C_' + doc_name+'))'
ro2_i = ' r'+str(i+1)+'o2 = '+'exp('+str(o2_sto)+'/(this%vh * C_o2))'
rkin_i = ' r'+str(i+1)+'kin = ' + 'mu_max_scaled * '+'r'+str(i+1)+'doc'+' * ' + 'r'+str(i+1)+'o2'
rkin[doc_name] = [rdoc_i,ro2_i,rkin_i]
sumkin = ' sumkin = '
for i in range(len(rxn_df)):
if i == len(rxn_df)-1:
sumkin = sumkin + ' r' + str(i+1) + 'kin '
elif i == 0:
sumkin = sumkin + 'r' + str(i+1) + 'kin + & \n'
else:
sumkin = sumkin + ' r' + str(i+1) + 'kin + & \n'
u = []
for i in range(len(rxn_df)):
u.append(' u' + str(i+1) + ' = 0.d0')
u.append(' if (r' + str(i+1) + 'kin > 0.d0) u' + str(i+1) + ' = r' + str(i+1) + 'kin/sumkin' )
rate = []
for i in range(len(rxn_df)):
rate.append(' rate(' + str(i+1) + ') = u' + str(i+1) + '*r' + str(i+1) + 'kin*(1-C_biomass/this%cc)')
res = {}
for i in primary_species_nocharge:
icol = rxn_df.columns.get_loc(i)
i = i.lower()
i_id = 'this%'+i+'_id'
res_i = [' Residual(' + i_id + ') = Residual(' + i_id +') &']
space_idx = res_i[0].find('=')
# first_rate_flag = True
for irow in range(len(rxn_df)):
if | pd.isnull(rxn_df.iloc[irow,icol]) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_net_income
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Líquido das Operações Continuadas' OR ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Lucro/Prejuízo do Período"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_net_income"] = df["ytd_net_income"] - \
df["ytd_net_income"].shift(1)
df["quarter_net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_net_income"] = (
df["quarter_net_income"].rolling(window=4, min_periods=4).sum()
)
if quarter == False:
df = df.drop(["quarter_net_income"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_income"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Net Income (R$,000) ')
return df
def ebit(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebit information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebit", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_ebit
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR ds_conta = 'Resultado Operacional')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["quarter_ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_ebit"] = df["quarter_ebit"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebit"], axis=1)
if ytd == False:
df = df.drop(["ytd_ebit"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBIT (R$,000) ')
return df
def depre_amort(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the depreciationa and amortization information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="depre_amort", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_d_a
FROM dva
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND ds_conta = 'Depreciação, Amortização e Exaustão'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["quarter_d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
if ttm == True:
df["ttm_d_a"] = df["quarter_d_a"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_d_a"], axis=1)
if ytd == False:
df = df.drop(["ytd_d_a"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo, bars=' D&A (R$,000)')
return df
def ebitda(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebitda information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebitda", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dre.dt_fim_exerc AS date,
dre.fiscal_quarter,
dre.ds_conta,
dre.vl_conta AS ytd_ebit,
dva.vl_conta AS ytd_d_a
FROM dre
LEFT JOIN dva ON (dre.dt_fim_exerc=dva.dt_fim_exerc AND dre.grupo_dfp=dva.grupo_dfp AND dre.cnpj=dva.cnpj)
WHERE dre.cnpj = '{self.cnpj}'
AND dre.grupo_dfp = '{self.grupo}'
AND dre.dt_fim_exerc >= '{begin_period.date()}'
AND (dre.ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR dre.ds_conta = 'Resultado Operacional')
AND dva.ds_conta = 'Depreciação, Amortização e Exaustão'
ORDER BY dre.dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df["ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
df["d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
df["quarter_ebitda"] = df["ebit"] - df["d_a"]
if ttm == True:
df["ttm_ebitda"] = df["quarter_ebitda"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebitda"], axis=1)
if ytd == True:
df["ytd_ebitda"] = df["ytd_ebit"] - df["ytd_d_a"]
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(
columns=["fiscal_quarter", "ds_conta",
"ytd_ebit", "ytd_d_a", "d_a", "ebit"]
)
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBITDA (R$,000) ')
return df
def revenue(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the revenue information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="revenue", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_revenue
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND cd_conta = '3.01'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["quarter_revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_revenue"] = df["quarter_revenue"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_revenue"], axis=1)
if ytd == False:
df = df.drop(["ytd_revenue"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Revenue (R$,000) ')
return df
def cash_equi(self, start_period="all", plot=False):
"""
Creates a dataframe with the cash and cash equivalents information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="cash_equi", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, SUM(vl_conta) AS cash_equi
FROM bpa
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}')
AND (ds_conta = 'Caixa e Equivalentes de Caixa' OR ds_conta = 'Aplicações Financeiras' )
AND (cd_conta != '1.02.01.03.01')
AND dt_fim_exerc >= '{begin_period}'
GROUP BY dt_fim_exerc
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Cash & Equivalents (R$,000) ')
return df
def total_debt(self, start_period="all", plot=False):
"""
Creates a dataframe with the total debt information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="total_debt", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, SUM(vl_conta) AS total_debt
FROM bpp
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' AND ds_conta = 'Empréstimos e Financiamentos')
AND (cd_conta = '2.01.04' OR cd_conta = '2.02.01')
AND dt_fim_exerc >= '{begin_period}'
GROUP BY dt_fim_exerc
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Total Debt (R$,000) ')
return df
def market_value(self, start_period="all", plot=False):
"""
Creates a dataframe with the market value information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="market_value", start_period=start_period
)
try:
self.pn_ticker
except:
query = f"""SELECT date, (preult * number_shares) AS market_value
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
else:
query = f"""SELECT date, SUM(preult * number_shares) AS market_value
FROM prices
WHERE (ticker = '{self.on_ticker}' OR ticker ='{self.pn_ticker}')
AND date >= '{begin_period}'
GROUP BY date
ORDER BY date"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.line_plot(df, self.ticker, self.grupo,
line=' Market Value (R$,000) ')
return df
def net_debt(self, start_period="all", plot=False):
"""
Creates a dataframe with the net debt information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
total_debt = Ticker.total_debt(self, start_period=start_period)
cash = Ticker.cash_equi(self, start_period=start_period)
net_debt = total_debt["total_debt"] - cash["cash_equi"]
net_debt.rename("net_debt", axis=1, inplace=True)
if plot:
_plots.bar_plot(pd.DataFrame(net_debt), self.ticker,
self.grupo, bars=' Net Debt (R$,000) ')
return net_debt
def eps(self, start_period="all"):
"""
Creates a dataframe with the earnings per share(ttm) information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="eps", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-7)
ni = Ticker.net_income(
self, quarter=False, ytd=False, start_period=begin_period)
shares = Ticker.total_shares(self, start_period=begin_period)
eps = shares.merge(
ni[["ttm_net_income"]], how="outer", left_index=True, right_index=True
)
eps = eps.ffill()
eps["eps"] = (eps["ttm_net_income"] * 1000) / eps["total_shares"]
eps = eps[["eps"]]
if start_period == "last":
eps = eps.iloc[-1:, :]
else:
eps = eps[eps.index >= begin_period + pd.DateOffset(months=7)]
eps = eps.dropna()
return eps
def price_earnings(self, start_period="all", plot=False):
"""
Creates a dataframe with the price earnings(ttm) information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
prices = Ticker.prices(self, start_period=start_period)
eps = Ticker.eps(self, start_period=start_period)
pe = prices["price"] / eps["eps"]
pe.rename("p_e", inplace=True)
if plot:
_plots.line_plot(pd.DataFrame(pe), self.ticker,
self.grupo, line=' Price/Earnings ')
return pe
def total_equity(self, start_period="all", plot=False):
"""
Creates a dataframe with the total equity information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="total_equity", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, vl_conta AS total_equity
FROM bpp
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}')
AND (ds_conta = 'Patrimônio Líquido' OR ds_conta = 'Patrimônio Líquido Consolidado')
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Total Equity (R$,000) ')
return df
def total_assets(self, start_period="all", plot=False):
"""
Creates a dataframe with the total assets information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="total_assets", start_period=start_period
)
query = f"""SELECT dt_fim_exerc AS date, vl_conta AS total_assets
FROM bpa
WHERE (cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}')
AND cd_conta = '1'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Total Assets (R$,000) ')
return df
def roe(self, start_period="all", plot=False):
"""
Creates a dataframe with the return on equity information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
ni = Ticker.net_income(
self, quarter=False, ytd=False, start_period=start_period
)
tequity = Ticker.total_equity(self, start_period=start_period)
roe = (ni["ttm_net_income"] / tequity["total_equity"]) * 100
roe.rename("roe", inplace=True)
roe = roe.dropna()
if plot:
_plots.bar_plot(pd.DataFrame(roe), self.ticker,
self.grupo, bars=' ROE (%) ')
return roe
def roa(self, start_period="all", plot=False):
"""
Creates a dataframe with the return on assets information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
ni = Ticker.net_income(
self, quarter=False, ytd=False, start_period=start_period
)
tassets = Ticker.total_assets(self, start_period=start_period)
roa = (ni["ttm_net_income"] / tassets["total_assets"]) * 100
roa.rename("roa", inplace=True)
roa = roa.dropna()
if plot:
_plots.bar_plot(pd.DataFrame(roa), self.ticker,
self.grupo, bars=' ROA (%) ')
return roa
def debt_to_equity(self, start_period="all"):
"""
Creates a dataframe with the debt to equity information of the object.
Args:
start_period: string
Returns: pandas dataframe
"""
debt = Ticker.total_debt(self, start_period=start_period)
equity = Ticker.total_equity(self, start_period=start_period)
debt_to_equity = debt["total_debt"] / equity["total_equity"]
debt_to_equity.rename("debt_to_equity", inplace=True)
return debt_to_equity
def financial_leverage(self, start_period="all"):
"""
Creates a dataframe with the financial leverage (total assets / total equity)
information of the object.
Args:
start_period: string
Returns: pandas dataframe
"""
assets = Ticker.total_assets(self, start_period=start_period)
equity = Ticker.total_equity(self, start_period=start_period)
financial_leverage = assets["total_assets"] / equity["total_equity"]
financial_leverage.rename("financial_leverage", inplace=True)
return financial_leverage
def current_ratio(self, start_period="all"):
"""
Creates a dataframe with the current ratio information of the object.
Args:
start_period: string
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="current_ratio", start_period=start_period
)
current_ratio = pd.read_sql(
f"""SELECT bpa.dt_fim_exerc AS date, (CAST(bpa.vl_conta AS float) / CAST(bpp.vl_conta AS float)) AS current_ratio
FROM bpa
LEFT JOIN bpp ON (bpa.dt_fim_exerc=bpp.dt_fim_exerc AND bpa.cnpj=bpp.cnpj AND bpa.grupo_dfp=bpp.grupo_dfp)
WHERE
bpa.cnpj = '{self.cnpj}' AND
bpa.grupo_dfp = '{self.grupo}' AND
bpa.ds_conta = 'Ativo Circulante' AND
bpa.dt_fim_exerc >= '{begin_period}' AND
bpp.ds_conta = 'Passivo Circulante'
ORDER BY bpa.dt_fim_exerc""",
conn,
index_col="date", parse_dates=['date']
)
return current_ratio
def gross_profit_margin(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the groos profit margin information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="gross_profit_margin", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
# This query uses a self join on dre
query = f"""SELECT a.dt_fim_exerc AS date, a.fiscal_quarter, a.vl_conta AS ytd_gross_profit, b.vl_conta AS ytd_revenue
FROM dre AS a
LEFT JOIN dre AS b ON (a.dt_fim_exerc=b.dt_fim_exerc AND a.grupo_dfp=b.grupo_dfp AND a.cnpj=b.cnpj)
WHERE a.cnpj = '{self.cnpj}' AND
a.grupo_dfp = '{self.grupo}' AND
a.dt_fim_exerc >= '{begin_period.date()}' AND
a.cd_conta = '3.03' AND
b.cd_conta = '3.01'
ORDER BY a.dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date")
df["ytd_gross_profit_margin"] = df["ytd_gross_profit"] / df["ytd_revenue"]
df["revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["gross_profit"] = df["ytd_gross_profit"] - \
df["ytd_gross_profit"].shift(1)
df["revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
df["gross_profit"][df["fiscal_quarter"] == 1] = df["ytd_gross_profit"][
df["fiscal_quarter"] == 1
]
df["gross_profit_margin"] = df["gross_profit"] / df["revenue"]
if ttm == True:
df["ttm_revenue"] = df["revenue"].rolling(
window=4, min_periods=4).sum()
df["ttm_gross_profit"] = (
df["gross_profit"].rolling(window=4, min_periods=4).sum()
)
df["ttm_gross_profit_margin"] = df["ttm_gross_profit"] / \
df["ttm_revenue"]
df = df.drop(["ttm_revenue", "ttm_gross_profit"], axis=1)
if quarter == False:
df = df.drop(["gross_profit_margin"], axis=1)
if ytd == False:
df = df.drop(["ytd_gross_profit_margin"], axis=1)
df = df.drop(
["ytd_gross_profit", "ytd_revenue", "revenue", "gross_profit"], axis=1
)
df.index = pd.to_datetime(df.index)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = df * 100
return df
def net_profit_margin(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the net profit margin information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_profit_margin", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
# This query uses a self join on dre
query = f"""SELECT a.dt_fim_exerc AS date, a.fiscal_quarter, a.ds_conta, a.vl_conta AS ytd_net_income, b.vl_conta AS ytd_revenue, b.cd_conta
FROM dre a
LEFT JOIN dre b ON (a.dt_fim_exerc=b.dt_fim_exerc AND a.grupo_dfp=b.grupo_dfp AND a.cnpj=b.cnpj)
WHERE a.cnpj = '{self.cnpj}' AND
a.grupo_dfp = '{self.grupo}' AND
a.dt_fim_exerc >= '{begin_period.date()}' AND
b.cd_conta = '3.01' AND
(a.ds_conta = 'Resultado Líquido das Operações Continuadas' OR a.ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY a.dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date")
net_income = df["ytd_net_income"][
df["ds_conta"] == "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
net_income = df["ytd_net_income"][
df["ds_conta"] == "Lucro/Prejuízo do Período"
]
df["ytd_net_profit_margin"] = net_income / df["ytd_revenue"]
df["revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["net_income"] = df["ytd_net_income"] - df["ytd_net_income"].shift(1)
df["revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
df["net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
df["net_profit_margin"] = df["net_income"] / df["revenue"]
if ttm == True:
df["ttm_revenue"] = df["revenue"].rolling(
window=4, min_periods=4).sum()
df["ttm_net_income"] = (
df["net_income"].rolling(window=4, min_periods=4).sum()
)
df["ttm_net_profit_margin"] = df["ttm_net_income"] / df["ttm_revenue"]
df = df.drop(["ttm_revenue", "ttm_net_income"], axis=1)
if quarter == False:
df = df.drop(["net_profit_margin"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_profit_margin"], axis=1)
df = df.drop(
[
"ds_conta",
"cd_conta",
"ytd_net_income",
"ytd_revenue",
"revenue",
"net_income",
],
axis=1,
)
df.index = pd.to_datetime(df.index)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = df * 100
return df
def ebitda_margin(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the ebitda margin information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebitda_margin", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dre_ebit.dt_fim_exerc AS date,
dre_ebit.fiscal_quarter,
dre_ebit.ds_conta,
dre_ebit.vl_conta AS ytd_ebit,
dva.vl_conta AS ytd_d_a,
dre_revenue.vl_conta AS ytd_revenue
FROM dre AS dre_ebit
LEFT JOIN dva ON (
dre_ebit.dt_fim_exerc = dva.dt_fim_exerc AND
dre_ebit.grupo_dfp = dva.grupo_dfp AND
dre_ebit.cnpj = dva.cnpj)
LEFT JOIN dre AS dre_revenue ON(
dre_ebit.dt_fim_exerc = dre_revenue.dt_fim_exerc AND
dre_ebit.grupo_dfp = dre_revenue.grupo_dfp AND
dre_ebit.cnpj = dre_revenue.cnpj)
WHERE dre_ebit.cnpj = '{self.cnpj}'
AND dre_ebit.grupo_dfp = '{self.grupo}'
AND dre_ebit.dt_fim_exerc >= '{begin_period.date()}'
AND (dre_ebit.ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR dre_ebit.ds_conta = 'Resultado Operacional')
AND dva.ds_conta = 'Depreciação, Amortização e Exaustão'
AND dre_revenue.cd_conta = '3.01'
ORDER BY dre_ebit.dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df["revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
df["ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
df["d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
df["ebitda"] = df["ebit"] - df["d_a"]
if ttm == True:
df["ttm_ebitda"] = df["ebitda"].rolling(
window=4, min_periods=4).sum()
df["ttm_revenue"] = df["revenue"].rolling(
window=4, min_periods=4).sum()
df["ttm_ebitda_margin"] = df["ttm_ebitda"] / df["ttm_revenue"]
df.drop(columns=["ttm_ebitda", "ttm_revenue"], inplace=True)
if quarter == True:
df["ebitda_margin"] = df["ebitda"] / df["revenue"]
if ytd == True:
df["ytd_ebitda"] = df["ytd_ebit"] - df["ytd_d_a"]
df["ytd_ebitda_margin"] = df["ytd_ebitda"] / df["ytd_revenue"]
df.drop(columns=["ytd_ebitda"], inplace=True)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(
columns=[
"fiscal_quarter",
"ds_conta",
"ytd_ebit",
"ytd_d_a",
"d_a",
"ebit",
"ytd_revenue",
"revenue",
"ebitda",
]
)
return df * 100
def enterprise_value(self, start_period="all", plot=False):
"""
Creates a dataframe with the enterprise value information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
mv = Ticker.market_value(self, start_period=start_period)
if start_period not in ["last", "all"]:
start_period = pd.to_datetime(
start_period) + pd.DateOffset(months=-7)
nd = Ticker.net_debt(self, start_period=start_period)
df = mv.merge(nd, how="outer", left_index=True, right_index=True)
df = df.ffill()
df["ev"] = df["market_value"] + (df["net_debt"] * 1000)
df = df[['ev']].dropna()
if plot:
_plots.line_plot(df, self.ticker, self.grupo,
line=' Enterprise Value (R$,000) ')
return df
def ev_ebitda(self, start_period="all", plot=False):
"""
Creates a dataframe with the enterprise value / ebitda
information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
ev = Ticker.enterprise_value(self, start_period=start_period)
if start_period not in ["last", "all"]:
start_period = pd.to_datetime(
start_period) + pd.DateOffset(months=-7)
ebitda = Ticker.ebitda(
self, quarter=False, ytd=False, ttm=True, start_period=start_period
)
df = ev.merge(ebitda, how="outer", left_index=True, right_index=True)
df = df.ffill()
df["ev_ebitda"] = (df["ev"] / df["ttm_ebitda"]) / 1000
df = df[['ev_ebitda']].dropna()
if plot:
_plots.line_plot(df, self.ticker, self.grupo, line=' EV/EBITDA ')
return df
def ev_ebit(self, start_period="all", plot=False):
"""
Creates a dataframe with the enterprise value / ebit
information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
ev = Ticker.enterprise_value(self, start_period=start_period)
if start_period not in ["last", "all"]:
start_period = pd.to_datetime(
start_period) + pd.DateOffset(months=-7)
ebit = Ticker.ebit(
self, quarter=False, ytd=False, ttm=True, start_period=start_period
)
df = ev.merge(ebit, how="outer", left_index=True, right_index=True)
df = df.ffill()
df["ev_ebit"] = (df["ev"] / df["ttm_ebit"]) / 1000
df = df[['ev_ebit']].dropna()
if plot:
_plots.line_plot(df, self.ticker, self.grupo, line=' EV/EBIT ')
return df
def bv_share(self, start_period="all"):
"""
Creates a dataframe with the book value per share information of the object.
Args:
start_period: string
Returns: pandas dataframe
"""
shares = Ticker.total_shares(self, start_period=start_period)
if start_period not in ["last", "all"]:
start_period = pd.to_datetime(
start_period) + pd.DateOffset(months=-7)
equity = Ticker.total_equity(self, start_period=start_period)
df = shares.merge(equity, how="outer",
left_index=True, right_index=True)
df = df.ffill()
df["bv_share"] = (df["total_equity"] / df["total_shares"]) * 1000
df = df[['bv_share']].dropna()
return df
def price_bv(self, start_period="all", plot=False):
"""
Creates a dataframe with the price / book value
information of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
prices = Ticker.prices(self, start_period=start_period)
bv = Ticker.bv_share(self, start_period=start_period)
p_bv = prices["price"] / bv["bv_share"]
p_bv.rename("p_bv", inplace=True)
if plot:
_plots.line_plot(pd.DataFrame(p_bv), self.ticker,
self.grupo, line=' Price/BV ')
return p_bv
def cagr_net_income(self, n_years=5):
"""
Return the compound annual growth rate of the net income of the object.
Args:
n_years: int
number of years to consider when calculating
Returns: float
"""
final_date = pd.read_sql(
f"""SELECT dt_fim_exerc
FROM dre
WHERE cnpj = '{self.cnpj}' AND
grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc DESC
LIMIT(1)""",
conn,
).values[0][0]
begin_date = pd.to_datetime(final_date) + pd.DateOffset(years=-n_years)
df = Ticker.net_income(
self, quarter=False, ytd=False, ttm=True, start_period=begin_date
)
cagr = (((df.iloc[-1][0] / df.iloc[0][0]) ** (1 / n_years)) - 1) * 100
return cagr
def cagr_revenue(self, n_years=5):
"""
Return the compound annual growth rate of the revenue of the object.
Args:
n_years: int
number of years to consider when calculating
Returns: float
"""
final_date = pd.read_sql(
f"""SELECT dt_fim_exerc
FROM dre
WHERE cnpj = '{self.cnpj}' AND
grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc DESC
LIMIT(1)""",
conn,
).values[0][0]
begin_date = pd.to_datetime(final_date) + pd.DateOffset(years=-n_years)
df = Ticker.revenue(
self, quarter=False, ytd=False, ttm=True, start_period=begin_date
)
cagr = (((df.iloc[-1][0] / df.iloc[0][0]) ** (1 / n_years)) - 1) * 100
return cagr
def cfo(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the cash flow from operations information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="cfo", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_cfo
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND cd_conta = '6.01'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_cfo"] = df["ytd_cfo"] - df["ytd_cfo"].shift(1)
df["quarter_cfo"][df["fiscal_quarter"] ==
1] = df["ytd_cfo"][df["fiscal_quarter"] == 1]
if ttm == True:
df["ttm_cfo"] = df["quarter_cfo"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_cfo"], axis=1)
if ytd == False:
df = df.drop(["ytd_cfo"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo, bars=' CFO (R$,000) ')
return df
def get_peers(self):
"""
Returns the peer companies of the company calling the method.
Based on sector, subsector and segment.
Returns: list
"""
query = f"""SELECT ticker
FROM tickers
WHERE
sector = '{self.sector}' AND
subsector = '{self.subsector}' AND
segment = '{self.segment}'
ORDER BY ticker"""
df = pd.read_sql(query, conn)
return df["ticker"].to_list()
def statistics(tickers):
"""
Returns a dataframe with several measures for each ticker in the list.
Args:
tickers: list
list with the tickers to compute the metrics.
In this list can be passed strings or Ticker Class objects
Returns: pandas dataframe
"""
to_compare = {}
for i in range(len(tickers)):
if isinstance(tickers[i], str):
to_compare[i] = {"obj": Ticker(tickers[i])}
else:
to_compare[i] = {"obj": tickers[i]}
statistics = pd.DataFrame()
for i in range(len(to_compare)):
p_e = Ticker.price_earnings(
to_compare[i]["obj"], start_period="last")
ev_ebitda = Ticker.ev_ebitda(
to_compare[i]["obj"], start_period="last")
p_bv = Ticker.price_bv(to_compare[i]["obj"], "last")
ev_ebit = Ticker.ev_ebit(to_compare[i]["obj"], start_period="last")
bv_share = Ticker.bv_share(
to_compare[i]["obj"], start_period="last")
eps = Ticker.eps(to_compare[i]["obj"], start_period="last")
gross_profit_margin = Ticker.gross_profit_margin(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
net_profit_margin = Ticker.net_profit_margin(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
roe = Ticker.roe(to_compare[i]["obj"], start_period="last")
roa = Ticker.roa(to_compare[i]["obj"], start_period="last")
debt_to_equity = Ticker.debt_to_equity(
to_compare[i]["obj"], start_period="last"
)
equity = Ticker.total_equity(
to_compare[i]["obj"], start_period="last")
assets = Ticker.total_assets(
to_compare[i]["obj"], start_period="last")
total_debt = Ticker.total_debt(
to_compare[i]["obj"], start_period="last")
cash_equi = Ticker.cash_equi(
to_compare[i]["obj"], start_period="last")
net_debt = Ticker.net_debt(
to_compare[i]["obj"], start_period="last")
mv = Ticker.market_value(to_compare[i]["obj"], start_period="last")
ev = Ticker.enterprise_value(
to_compare[i]["obj"], start_period="last")
ebitda = Ticker.ebitda(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
depre_amort = Ticker.depre_amort(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
ebit = Ticker.ebit(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
revenue = Ticker.revenue(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
ni = Ticker.net_income(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
cfo = Ticker.cfo(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
current_ratio = Ticker.current_ratio(
to_compare[i]["obj"], start_period="last"
)
ebitda_margin = Ticker.ebitda_margin(
to_compare[i]["obj"],
quarter=False,
ytd=False,
ttm=True,
start_period="last",
)
financial_leverage = Ticker.financial_leverage(
to_compare[i]["obj"], start_period="last"
)
df = pd.concat(
[
p_e,
ev_ebitda,
p_bv,
ev_ebit,
bv_share,
eps,
gross_profit_margin,
net_profit_margin,
roe,
roa,
debt_to_equity,
equity,
assets,
total_debt,
cash_equi,
net_debt,
mv,
ev,
ebitda,
ebitda_margin,
financial_leverage,
depre_amort,
ebit,
revenue,
ni,
cfo,
current_ratio,
],
axis=1,
)
df = df.reset_index()
df["cagr_net_income"] = Ticker.cagr_net_income(
to_compare[i]["obj"], n_years=5
)
df["cagr_revenue"] = Ticker.cagr_revenue(
to_compare[i]["obj"], n_years=5)
df["date"] = max(df["date"])
df = df.groupby("date").max()
df = df.reset_index()
df.index = [to_compare[i]["obj"].ticker]
df["sector"] = to_compare[i]["obj"].sector
df["subsector"] = to_compare[i]["obj"].subsector
df["segment"] = to_compare[i]["obj"].segment
statistics = | pd.concat([statistics, df], axis=0) | pandas.concat |
# License: Apache-2.0
from gators.encoders.target_encoder import TargetEncoder
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}})
obj = TargetEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}}).astype(np.float32)
obj = TargetEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat():
X = pd.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = TargetEncoder().fit(X, y)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}})
obj = TargetEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': {0: 0.0,
1: 0.0,
2: 0.0,
3: 0.6666666666666666,
4: 0.6666666666666666,
5: 0.6666666666666666},
'B': {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.5},
'C': {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25, 4: 0.5, 5: 0.5},
'D': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0}}).astype(np.float32)
obj = TargetEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat_ks():
X = ks.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = TargetEncoder().fit(X, y)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
def test_float32_pd(data_float32):
obj, X, X_expected = data_float32
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_pd_np(data_float32):
obj, X, X_expected = data_float32
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks_np(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
def test_no_cat_pd(data_no_cat):
obj, X, X_expected = data_no_cat
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_no_cat_ks(data_no_cat_ks):
obj, X, X_expected = data_no_cat_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_no_cat_pd_np(data_no_cat):
obj, X, X_expected = data_no_cat
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards= | pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]) | pandas.Series |
# Package import
from __future__ import print_function, division
from warnings import warn
from nilmtk.disaggregate import Disaggregator
import os
import pickle
import pandas as pd
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import sys
import torch
from torchsummary import summary
import torch.nn as nn
import torch.utils.data as tud
from torch.utils.data.dataset import TensorDataset
from torch.utils.tensorboard import SummaryWriter
import time
# Fix the random seed to ensure the reproducibility of the experiment
random_seed = 10
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Use cuda or not
USE_CUDA = torch.cuda.is_available
class seq2point_Pytorch(nn.Module):
def __init__(self, sequence_length):
# Refer to "<NAME>, <NAME>, <NAME>, et al. Sequence-to-point learning with neural networks for non-intrusive load monitoring[C].The 32nd AAAI Conference on Artificial Intelligence"
super(seq2point_Pytorch, self).__init__()
self.seq_length = sequence_length
self.conv = nn.Sequential(
nn.ConstantPad1d((4, 5), 0),
nn.Conv1d(1, 30, 10, stride=1),
nn.ReLU(True),
nn.ConstantPad1d((3, 4), 0),
nn.Conv1d(30, 30, 8, stride=1),
nn.ReLU(True),
nn.ConstantPad1d((2, 3), 0),
nn.Conv1d(30, 40, 6, stride=1),
nn.ReLU(True),
nn.ConstantPad1d((2, 2), 0),
nn.Conv1d(40, 50, 5, stride=1),
nn.ReLU(True),
nn.ConstantPad1d((2, 2), 0),
nn.Conv1d(50, 50, 5, stride=1),
nn.ReLU(True)
)
self.dense = nn.Sequential(
nn.Linear(50 * sequence_length, 1024),
nn.ReLU(),
nn.Linear(1024, 1)
)
def forward(self, x):
x = self.conv(x)
x = self.dense(x.view(-1, 50 * self.seq_length))
return x.view(-1, 1)
def initialize(layer):
# Xavier_uniform will be applied to conv1d and dense layer, to be sonsistent with Keras and Tensorflow
if isinstance(layer,nn.Conv1d) or isinstance(layer, nn.Linear):
torch.nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias.data, val = 0.0)
def train(appliance_name, model, mains, appliance, epochs, batch_size, pretrain = False,checkpoint_interval = None, train_patience = 3):
# Model configuration
if USE_CUDA:
model = model.cuda()
if not pretrain:
model.apply(initialize)
summary(model, (1, mains.shape[1]))
# Split the train and validation set
train_mains,valid_mains,train_appliance,valid_appliance = train_test_split(mains, appliance, test_size=.2, random_state = random_seed)
# Create optimizer, loss function, and dataloader
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
loss_fn = torch.nn.MSELoss(reduction = 'mean')
train_dataset = TensorDataset(torch.from_numpy(train_mains).float().permute(0,2,1), torch.from_numpy(train_appliance).float())
train_loader = tud.DataLoader(train_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
valid_dataset = TensorDataset(torch.from_numpy(valid_mains).float().permute(0,2,1), torch.from_numpy(valid_appliance).float())
valid_loader = tud.DataLoader(valid_dataset, batch_size = batch_size, shuffle = True, num_workers = 0, drop_last = True)
writer = SummaryWriter(comment='train_visual')
patience, best_loss = 0, None
for epoch in range(epochs):
# Earlystopping
if(patience == train_patience):
print("val_loss did not improve after {} Epochs, thus Earlystopping is calling".format(train_patience))
break
# train the model
model.train()
st = time.time()
for i, (batch_mains, batch_appliance) in enumerate(train_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_appliance, batch_pred)
model.zero_grad()
loss.backward()
optimizer.step()
ed = time.time()
# Evaluate the model
model.eval()
with torch.no_grad():
cnt, loss_sum = 0, 0
for i, (batch_mains, batch_appliance) in enumerate(valid_loader):
if USE_CUDA:
batch_mains = batch_mains.cuda()
batch_appliance = batch_appliance.cuda()
batch_pred = model(batch_mains)
loss = loss_fn(batch_appliance, batch_pred)
loss_sum += loss
cnt += 1
final_loss = loss_sum / cnt
final_loss = loss_sum / cnt
# Save best only
if best_loss is None or final_loss < best_loss:
best_loss = final_loss
patience = 0
net_state_dict = model.state_dict()
path_state_dict = "./"+appliance_name+"_seq2point_best_state_dict.pt"
torch.save(net_state_dict, path_state_dict)
else:
patience = patience + 1
print("Epoch: {}, Valid_Loss: {}, Time consumption: {}s.".format(epoch, final_loss, ed - st))
# For the visualization of training process
for name,param in model.named_parameters():
writer.add_histogram(name + '_grad', param.grad, epoch)
writer.add_histogram(name + '_data', param, epoch)
writer.add_scalars("MSELoss", {"Valid":final_loss}, epoch)
# Save checkpoint
if (checkpoint_interval != None) and ((epoch + 1) % checkpoint_interval == 0):
checkpoint = {"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch}
path_checkpoint = "./"+appliance_name+"_seq2point_{}_epoch.pkl".format(epoch)
torch.save(checkpoint, path_checkpoint)
def test(model, test_mains, batch_size = 512):
# Model test
st = time.time()
model.eval()
# Create test dataset and dataloader
batch_size = test_mains.shape[0] if batch_size > test_mains.shape[0] else batch_size
test_dataset = TensorDataset(torch.from_numpy(test_mains).float().permute(0,2,1))
test_loader = tud.DataLoader(test_dataset, batch_size = batch_size, shuffle = False, num_workers = 0)
with torch.no_grad():
for i, batch_mains in enumerate(test_loader):
batch_pred = model(batch_mains[0])
if i == 0:
res = batch_pred
else:
res = torch.cat((res, batch_pred), dim = 0)
ed = time.time()
print("Inference Time consumption: {}s.".format(ed - st))
return res.numpy()
class Seq2Point(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "Seq2Point"
self.models = OrderedDict()
self.chunk_wise_training = params.get('chunk_wise_training',False)
self.sequence_length = params.get('sequence_length',129)
self.n_epochs = params.get('n_epochs', 10 )
self.batch_size = params.get('batch_size',512)
self.appliance_params = params.get('appliance_params',{})
self.mains_mean = params.get('mains_mean',None)
self.mains_std = params.get('mains_std',None)
if self.sequence_length % 2 == 0:
print ("Sequence length should be odd!")
raise (SequenceLengthError)
def partial_fit(self,train_main,train_appliances,pretrain = False, do_preprocessing=True, **load_kwargs):
# Seq2Point version
# If no appliance wise parameters are provided, then copmute them using the first chunk
if len(self.appliance_params) == 0:
self.set_appliance_params(train_appliances)
print("...............Seq2Point partial_fit running...............")
# Preprocess the data and bring it to a valid shape
if do_preprocessing:
train_main, train_appliances = self.call_preprocessing(
train_main, train_appliances, 'train')
train_main = pd.concat(train_main,axis=0)
train_main = train_main.values.reshape((-1,self.sequence_length,1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = pd.concat(app_df,axis=0)
app_df_values = app_df.values.reshape((-1,1))
new_train_appliances.append((app_name, app_df_values))
train_appliances = new_train_appliances
for appliance_name, power in train_appliances:
if appliance_name not in self.models:
print("First model training for ", appliance_name)
self.models[appliance_name] = seq2point_Pytorch(self.sequence_length)
# Load pretrain dict or not
if pretrain is True:
self.models[appliance_name].load_state_dict(torch.load("./"+appliance_name+"_seq2point_pre_state_dict.pt"))
model = self.models[appliance_name]
train(appliance_name, model, train_main, power, self.n_epochs, self.batch_size,pretrain,checkpoint_interval = 3)
# Model test will be based on the best model
self.models[appliance_name].load_state_dict(torch.load("./"+appliance_name+"_seq2point_best_state_dict.pt"))
def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
# Disaggregate (test process)
if do_preprocessing:
test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
test_predictions = []
for test_main in test_main_list:
test_main = test_main.values
test_main = test_main.reshape((-1, self.sequence_length, 1))
disggregation_dict = {}
for appliance in self.models:
# Move the model to cpu, and then test it
model = self.models[appliance].to('cpu')
prediction = test(model, test_main)
prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
valid_predictions = prediction.flatten()
valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
df = pd.Series(valid_predictions)
disggregation_dict[appliance] = df
results = pd.DataFrame(disggregation_dict, dtype='float32')
test_predictions.append(results)
return test_predictions
def call_preprocessing(self, mains_lst, submeters_lst, method):
# Seq2Point Version
if method == 'train':
# Preprocess the main and appliance data, the parameter 'overlapping' will be set 'True'
mains_df_list = []
for mains in mains_lst:
new_mains = mains.values.flatten()
self.mains_mean, self.mains_std = new_mains.mean(), new_mains.std()
n = self.sequence_length
units_to_pad = n // 2
new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
new_mains = (new_mains - self.mains_mean) / self.mains_std
mains_df_list.append( | pd.DataFrame(new_mains) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import pandas.testing as tm
import numpy as np
from pandas_xyz import algorithms as algs
class TestAlgorithms(unittest.TestCase):
def test_displacement(self):
"""Test out my distance algorithm with hand calcs."""
lon = pd.Series([0.0, 0.0, 0.0])
lon_ew = pd.Series([0.0, 1.0, 2.0])
lat = pd.Series([0.0, 0.0, 0.0])
lat_ns = pd.Series([0.0, 1.0, 2.0])
disp_ew = algs.ds_from_xy(lat, lon_ew)
self.assertIsInstance(disp_ew, pd.Series)
tm.assert_series_equal(
disp_ew,
6371000 * 1.0 * np.pi / 180 * | pd.Series([0, 1, 1]) | pandas.Series |
import os
import time
from collections import OrderedDict, defaultdict
import numpy as np
from scipy.stats import entropy as KL
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['agg.path.chunksize'] = 100000
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
import pandas as pd
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from utils import display_progress
import warnings
warnings.simplefilter("ignore")
def normalize(influences):
"""Normalize influences to [-1,1]"""
maximum = influences.max()
minimum = influences.min()
assert maximum > 0
if minimum < -maximum:
scale = -minimum
else:
scale = maximum
return influences / scale
def cos(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def get_auc_score(inds, flip_ind, rescale=False):
"""
Compute AUC score of outlier detector
Arguments:
inds: list, indices of data in outlier score order (e.g. self-inf)
flip_ind: list, indices of ground-truth outliers
rescale: bool, whether to rescale to perfect 0-1
Returns:
AUC: float between 0 and 1, the larger the better
rates: list of discover rates at each fraction of training data checked
# positions: list of ranks of ground-truth outliers
"""
set_flip_ind = set(flip_ind)
N = len(inds) # number of training samples
Nflip = len(flip_ind) # number of flipped samples
rates = [0.0 for _ in range(N)]
for k in range(N):
rates[k] = (inds[k] in set_flip_ind) / float(Nflip) + (rates[k-1] if k > 0 else 0)
if rescale:
for k in range(Nflip):
rates[k] *= Nflip / (k + 1.0)
# positions = [i for i, ind_x in enumerate(inds) if ind_x in set_flip_ind]
# auc = 1 - sum(positions) / (len(flip_ind) * len(inds))
auc = np.mean(rates)
return auc, rates
def self_inf_distribution_by_elbo(path, dataset, data_loader, influences, loss_fn):
"""
Plot distribution of self influences vs elbo
Arguments:
path: str, figure output path
dataset: str, dataset name
data_loader: pytorch dataloader
influences: np array (n_train, )
loss_fn: loss function
"""
print("Computing self influence distribution by ELBO")
influences = normalize(influences)
influences = influences.reshape((-1,))
# get losses
losses = []
for i, z in enumerate(data_loader.dataset):
if dataset[:5] in ['mnist', 'cifar']:
z = z[0]
with torch.no_grad():
losses.append(-loss_fn(z))
display_progress('Computing loss:', i, len(data_loader))
losses = np.array(losses)
# plot scatter
from sklearn.preprocessing import minmax_scale
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(losses.reshape((-1,1)), influences.reshape((-1,1)))
R2 = model.score(losses.reshape((-1,1)), influences.reshape((-1,1)))
x_new = np.linspace(min(losses), max(losses), 100)
y_new = model.predict(x_new[:, np.newaxis])
fig, ax = plt.subplots(figsize=(4,2))
cmap = getattr(cm, 'plasma_r', cm.hot_r)
s = 0.5 if len(data_loader) <= 5000 else 0.1
ax.scatter(losses, influences, s=s, alpha=0.8, c=cmap(minmax_scale(losses)))
ax.plot(x_new, y_new, linestyle='--', label=r'linear reg ($R^2=${:.2f})'.format(R2))
ax.set_xlabel(r'$-\ell_{\beta}(x_i)$')
ax.set_ylabel(r'VAE-TracIn($x_i$, $x_i$)')
plt.legend(loc=1)
plt.locator_params(axis='x', nbins=2)
plt.tight_layout()
plt.savefig(os.path.join(path, 'selfinf_byELBO.jpg'), dpi=400)
# plot density
df = | pd.DataFrame({'x': losses, 'y': influences}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from multiprocessing import Pool
class Neighborhood:
_names = {}
_namemap = {}
# We don't expect for there to be a large quantity of Neighborhoods.
# Therefore, this class isn't designed with efficiency in mind
# per se.
def __init__(self):
pass
@staticmethod
def update(dictionary: dict):
for key, items in dictionary.items():
if key not in Neighborhood._names:
# TODO: It may be necessary to check if `item`
# is unique for `_get_name_map()` to function
# properly.
Neighborhood._names[key] = set()
#elif item not in Neighborhood._names[key]:
#for item in items:
Neighborhood._names[key] |= set(items)
Neighborhood._namemap = Neighborhood._get_name_map()
@staticmethod
def _get_name_map():
d = {}
for key, items in Neighborhood._names.items():
for item in items:
d[item] = key
d[key] = key
return d
@staticmethod
def translate(series):
# TODO: Reimlement the `missing` functionaility here.
missing = [x for x in series if x not in Neighborhood._namemap]
if len(missing) > 0:
print(
"Could not find: ",
missing
)
return [Neighborhood._namemap[x] if x in Neighborhood._namemap else None for x in series]
@staticmethod
def translate_matrix(matrix):
matrix.index = Neighborhood.translate(matrix.index)
matrix.columns = Neighborhood.translate(matrix.columns)
if None in matrix.index:
matrix = matrix.drop([None])
if None in matrix.columns:
matrix = matrix.drop([None], axis=1)
return matrix
class World:
"""
World is used to store and process information about the word
and an array of generated agents.
"""
def __init__(self, processes=1, index=[]):
self._index = set(index)
self.data = | pd.DataFrame(index=index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pdb
import os
import shutil
import argparse
from matplotlib import pyplot as plt
from scipy.io import loadmat, savemat
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from utils import metric_scores
from utils import load_config_file, ucf_crime_old_cls_names
def prepare_gt(gtpth):
gt_list = []
fps = 30
f = open(gtpth, 'r')
for line in f.readlines():
line2 = []
line = line.replace('.mp4', '')
line = line.split(' ')
# Skip Normal videos
if line[0].startswith('Normal'):
continue
gt_list.append(line)
df = | pd.DataFrame(gt_list) | pandas.DataFrame |
# Librerias standard
import pandas as pd
from statsmodels.distributions.empirical_distribution import ECDF
from scipy.stats import gamma
import numpy as np
import pystan
import pickle
from matplotlib import pyplot as plt
import arviz as az
from datetime import datetime
from datetime import timedelta
# funciones propias
from services import getDataCol
from services import getDataWorld
from methods import poly
from methods import ecdf
def findIndex(d1):
#Dia en el que se encontro el primer caso de contagio.
index = d1.query('cases>0').index.tolist()[0]
#Encontrar el primer dia en el que las muertes acumuladas suman 10;
d1['cumdeaths'] = d1['deaths'].cumsum()
index1 = d1.query('cumdeaths>10').index.tolist()[0]
index2 = index1 - 30
return index, index1, index2
#----------------------------------------------------------------------
countries = [
"Denmark",
"Italy",
"Germany",
"Spain",
"United_Kingdom",
"France",
"Norway",
"Belgium",
"Austria",
"Sweden",
"Switzerland",
"Colombia"
]
measurements = ['schools_universities', 'travel_restrictions',
'public_events', 'sport', 'lockdown',
'social_distancing_encouraged', 'self_isolating_if_ill']
#Cantidad de elementos
nMeasurements = len(measurements)
nCountries = len(countries)
# ## Reading all data
d = getDataWorld() #Leer desde internet.
## get CFR
cfrByCountry = pd.read_csv("data/weighted_fatality.csv")
cfrByCountry['country'] = cfrByCountry.iloc[:,1]
cfrByCountry.loc[cfrByCountry.country == 'United Kingdom', 'country'] ='United_Kingdom' #Reemplazar el espacio por un guion bajo.
# Get Serial interval distribution g with a mean of 6.5 days
serialInterval = pd.read_csv("data/serial_interval.csv")
#Get
covariates = pd.read_csv('data/interventionsMod.csv') #Depure el contenido para eliminar la informacion no relevante.
# Converts strings to dates
for measi in measurements:
covariates[measi] = pd.to_datetime(covariates[measi])
## making all covariates that happen after lockdown to have same date as lockdown
for measi in measurements:
idx = covariates[measi] > covariates['lockdown']
covariates.loc[idx, measi] = covariates.loc[idx, 'lockdown']
p = covariates.shape[1] - 1
forecast = 0
# ------------------------------------------------------------------
#Forecaste length: This number include the days with data and the days to forecast.
N2 = 80 # Increase this for a further forecast
# ------------------------------------------------------------------
dates = {}
reported_cases = {}
deaths_by_country = {}
#Calcular polinomios ortogonales.
x1x2 = poly(np.arange(1,N2+1), p=2)
# dict like data for stan model
stan_data = {
'M': len(countries),
'N': [],
'p': p,
'x1': x1x2[:,0],
'x2': x1x2[:,1],
'y': [],
'covariate1': [],
'covariate2': [],
'covariate3': [],
'covariate4': [],
'covariate5': [],
'covariate6': [],
'covariate7': [],
'deaths': [],
'f': [],
'N0': 6, # N0 = 6 to make it consistent with Rayleigh
'cases': [],
'LENGTHSCALE': 7,
'SI': serialInterval['fit'][:N2].values,
'EpidemicStart': []
}
for country in countries:
# Get CFR by country, in case there's no value use the average
CFR = cfrByCountry.weighted_fatality[cfrByCountry.country == country].values
if CFR.shape[0] == 0:
print('%s has not CFR, average value will be used'%country)
CFR = cfrByCountry.weighted_fatality.mean()
else:
CFR = CFR[0]
covariates1 = covariates[covariates.Country == country].loc[:, measurements]
#Encontrar el primer dia con almenos un caso.
d1 = pd.DataFrame(d[d['countriesAndTerritories'] == country])
d1['date'] = pd.to_datetime(d1['dateRep'], format = '%d/%m/%Y')
d1 = d1.sort_values(by='date').reset_index()
# First day with cases, first day with more than 10 deaths, start of the Epidemic
index, index1, index2 = findIndex(d1)
if index2 < 0:
oneMonth = pd.DataFrame({'date': [d1.loc[index, 'date'] - timedelta(days = i) for i in range(1,16)],
'cases': 15 * [0],
'deaths': 15 * [0],
'cumdeaths': 15 * [0],
'countriesAndTerritories': 15 * [country] })
d1 = (d1.loc[index:, ].append(oneMonth, ignore_index=True))
d1 = d1.sort_values(by='date').reset_index()
index, index1, index2 = findIndex(d1)
print("First non-zero cases is on day %d, and 30 days before 5 days is day %d" %(index+1, index2+1))
d1 = d1.iloc[index2:, ]
stan_data['EpidemicStart'].append(index1+1-index2)
for covariatei in measurements:
d1[covariatei] = 1 * (pd.to_datetime(d1.dateRep, format= '%d/%m/%Y') >= pd.to_datetime(covariates1[covariatei].values[0], format= '%d/%m/%Y'))
#Almacenar fechas.
dates[country] = d1.date.values
# hazard estimation
N = d1.shape[0]
print("%s has %d days of data" %(country, N))
forecast = N2 - N
if forecast < 0:
print("ERROR!!!! increasing N2")
N2 = N
forecast = N2 - N
h = np.zeros(N2) # discrete hazard rate from time t = 1, ..., 100
# The infection-to-onset distribution is Gamma distributed with mean 5.1 days and coefficient of variation 0.86.
mean1 = 5.1
cv1 = 0.86
shape1 = cv1**(-2)
scale1 = mean1/shape1
x1 = np.random.gamma(shape1, scale = scale1, size = int(5e6))
# The onset-to-death distribution is also Gamma distributed with a mean of 18.8 days and a coefficient of variation 0.45
mean2 = 18.8
cv2 = 0.45
shape2 = cv2**(-2)
scale2 = mean2/shape2
x2 = np.random.gamma(shape2, scale = scale2, size = int(5e6))
# The infection-to-death distribution is therefore given by:
# π𝑚∼𝑖f𝑟𝑚⋅(Gamma(5.1,0.86)+Gamma(18.8,0.45))
f = ECDF(x1+x2)
convolution = lambda u: (CFR * f(u))
h[0] = (convolution(1.5) - convolution(0))
for i in range(1, h.size):
h[i] = (convolution((i + 1)+.5) - convolution((i+1)-.5)) / (1-convolution((i+1)-.5)) #Se suma 1 por el cambio de indices en python.
s = np.zeros(N2)
s[0] = 1
for i in range(1, N2):
s[i] = s[i-1]*(1-h[i-1])
f = s * h
y = np.hstack([d1['cases'].to_numpy(), -1 * np.ones(forecast)])
reported_cases[country] = d1.cases.to_numpy()
deaths = np.hstack([d1['deaths'].to_numpy(), -1 * np.ones(forecast)])
cases = np.hstack([d1['cases'].to_numpy(), -1 * np.ones(forecast)])
deaths_by_country[country] = d1['deaths'].to_numpy()
covariates2 = pd.DataFrame(d1.loc[:, measurements])
covariates2 = pd.concat([covariates2, covariates2.tail(1).iloc[np.full(forecast,0)]], ignore_index = True) # Completar hasta N2 con la ultima fila.
# append data
stan_data['N'].append(N)
stan_data['y'].append(y[0]) # just the index case!
# Store data
stan_data['covariate1'].append(covariates2.iloc[:,0].values.tolist())
stan_data['covariate2'].append(covariates2.iloc[:,1].values.tolist())
stan_data['covariate3'].append(covariates2.iloc[:,2].values.tolist())
stan_data['covariate4'].append(covariates2.iloc[:,3].values.tolist())
stan_data['covariate5'].append(covariates2.iloc[:,4].values.tolist())
stan_data['covariate6'].append(covariates2.iloc[:,5].values.tolist())
stan_data['covariate7'].append(covariates2.iloc[:,6].values.tolist())
stan_data['f'].append(f.tolist())
stan_data['deaths'].append(deaths.tolist())
stan_data['cases'].append(cases.tolist())
stan_data['N2'] = N2
stan_data['x']=list(range(1,N2+1))
# La informacion debe ir en tamano N2 x M
for i in range(1,8):
stan_data['covariate'+str(i)] = (np.array(stan_data['covariate'+str(i)]).T)
stan_data['cases'] = (np.array(stan_data['cases'], dtype= 'int').T)
stan_data['deaths'] = (np.array(stan_data['deaths'], dtype= 'int').T)
stan_data['f'] = np.array(stan_data['f']).T
stan_data['N'] = np.array(stan_data['N']).T
stan_data['covariate2'] = 0*stan_data['covariate2'] # remove travel bans
stan_data['covariate4'] = 0*stan_data['covariate5'] # remove sport
stan_data['covariate2'] = 1* stan_data['covariate7'] # self-isolating if ill
# create the `any intervention` covariate
stan_data['covariate4'] = 1*((stan_data['covariate1'] +
stan_data['covariate3'] +
stan_data['covariate5'] +
stan_data['covariate6'] +
stan_data['covariate7'])>0)
stan_data['covariate5'] = stan_data['covariate5']
stan_data['covariate6'] = stan_data['covariate6']
stan_data['covariate7'] = 0 # models should only take 6 covariates
# Load model
sm = pickle.load(open('stan-models/base.pkl', 'rb'))
# Fit models
fit = sm.sampling(data = stan_data, iter=200, warmup=100,chains=4, thin=4, control= {'adapt_delta': 0.90})
# fit = sm.sampling(data = stan_data, iter=10, warmup=2,chains=2, thin=2, control= {'adapt_delta': 0.90})
# Extract information from fited model
out = fit.extract()
prediction = out['prediction']
estimateddeaths = out['E_deaths']
estimateddeathsCF = out['E_deaths0']
plot_labels = ["School Closure",
"Self Isolation",
"Public Events",
"First Intervention",
"Lockdown", 'Social distancing']
alpha = | pd.DataFrame(out['alpha'], columns=plot_labels) | pandas.DataFrame |
##############################################################################################################
# This script reads in the two gold standards Litbank and Dekker et al and compares them to
# the .token files created by booknlp
# The data used here consists of only the 12 overlapping novels with their respecive overlapping
# parts of the text.
#
# Output:
# The script appends a csv with the Precision, Recall, and F1 for the respective book and respective tool
# and stores the false positives, false negatives, and correct detections
#
#
# BookNLP recognises the following NER tags/types
# (PERSON, NUMBER, DATE, DURATION, MISC, TIME, LOCATION, ORDINAL, MONEY, ORGANIZATION, SET, O)
# Dekker et al.'s collection covers the entity person (i.e. I-PERSON)
# LitBank covers six of the ACE 2005 categories:
# People (PER), Facilities (FAC), Geo-political entities (GPE), Locations (LOC), Vehicles (VEH), Organizations (ORG)
#
# Therefore we map the BookNLP entities to those of Dekker et al. in the following way:
# O stays O and PERSON turns to PER. We ignore rest for character detection (in particular)
##############################################################################################################
import pandas as pd
import csv
import sys
import re
# import own script
from hyphens import *
from calculate_metrics import *
books_mapping = {'AliceInWonderland': '11_alices_adventures_in_wonderland',
'DavidCopperfield': '766_david_copperfield',
'Dracula': '345_dracula',
'Emma': '158_emma',
'Frankenstein': '84_frankenstein_or_the_modern_prometheus',
'HuckleberryFinn': '76_adventures_of_huckleberry_finn',
'MobyDick': '2489_moby_dick',
'OliverTwist': '730_oliver_twist',
'PrideAndPrejudice': '1342_pride_and_prejudice',
'TheCallOfTheWild': '215_the_call_of_the_wild',
'Ulysses': '4300_ulysses',
'VanityFair': '599_vanity_fair'}
passed_variable = sys.argv[1]
booknlp_filepath = "/mnt/book-nlp/data/tokens/overlap/" + str(passed_variable) + ".tokens"
dekker_filepath = "/mnt/data/gold_standard/overlap/dekker_et_al/" + str(passed_variable) + ".gs"
litbank_filepath = "/mnt/data/gold_standard/overlap/litbank/" + books_mapping.get(str(passed_variable)) + ".tsv"
#######################################
# get current annotated book - BookNLP
#######################################
current_file = pd.read_csv(booknlp_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=["originalWord","ner"])
current_file = current_file.rename(columns={"originalWord": "original_word", "ner": "booknlp"})
# alternatively convert all PERSON to PER
current_file["booknlp"].replace('PERSON', 'PER', inplace = True)
# replace rest of entities with O
current_file.loc[~current_file["booknlp"].isin(['PER']), "booknlp"] = "O"
# correct hyphened words from booknlp (note: stanford CoreNLP only splits on "most hyphens")
current_file = correct_hyphened(current_file)
# reset the index to avoid all parts of hyphened words having same index
current_file = current_file.reset_index()
del current_file['index']
# remove chapter separation with stars"
if str(passed_variable) == "AliceInWonderland":
current_file = current_file.drop(current_file.index[1911:1931])
current_file = current_file.reset_index(drop=True)
#####################################
# get gold standard - Dekker
#####################################
gs_d = pd.read_csv(dekker_filepath, sep=' ', quoting=csv.QUOTE_NONE, usecols=[0,1], names=["original_word", "gs"])
gs_d = correct_hyphened(gs_d)
gs_d.loc[~gs_d["gs"].isin(['I-PERSON']), "gs"] = "O"
# compare if the output file and the gold standard are the same
try:
for index, word, ner in current_file.itertuples(index=True):
if word != gs_d["original_word"].loc[index]:
if (word == '(' and gs_d["original_word"].loc[index] == '-LRB-') or (word == ')' and gs_d["original_word"].loc[index] == '-RRB-') or (word == '[' and gs_d["original_word"].loc[index] == '-LSB-') or (word == ']' and gs_d["original_word"].loc[index] == '-RSB-'):
pass
elif (word in ["‘","-","' \" '",'"',"“",'-',"”","'",",","’"]) and (gs_d["original_word"].loc[index] in ['`',"``","--","''","'",'--']):
pass
elif (word == "—") and (gs_d["original_word"].loc[index] == '--'):
#print("Warning ", index, " '", word, "' in current is not the same as '", gs_d["original_word"].loc[index], "'in gs")
pass
elif (word == "'t" and gs_d["original_word"].loc[index] == "`") or (word == "is" and gs_d["original_word"].loc[index] == "tis") or (word == "honorable" and gs_d["original_word"].loc[index] == "honourable") or (word == "honor" and gs_d["original_word"].loc[index] == "honour"):
pass
elif (re.match(r"[a-zA-Z]*’[a-zA-Z]+", word)) and (re.match(r"[a-zA-Z]*'[a-zA-Z]+", gs_d["original_word"].loc[index])):
pass
elif (re.match(r"[a-zA-Z]*'[a-zA-Z]+", word)) and (re.match(r"[a-zA-Z]*’[a-zA-Z]+", gs_d["original_word"].loc[index])):
pass
else:
print("Position ", index, " '", word, "' in current is not the same as '", gs_d["original_word"].loc[index], "'in gs")
break
#Note: some original texts are longer than the annotated files, we stop the comparisson at that length
except KeyError:
print("Reached end of annotated file. Cropped currect_file.")
print("Last word ", word, " in line ", index)
current_file = current_file.truncate(after=index-1)
pass
# merge BookNLP and Dekker et al.
merged_booknlp_dekkeretal = pd.merge(current_file, gs_d, left_index=True, right_index=True)
########################################################
# run evaluation using gold standard Dekker et al.
########################################################
# hold the lines range of the currently detected named entity
range_ne = []
# set booleans to keep of track of false positives/negatives of entities spreading over multiple rows
false_negative_booknlp = False
false_positive_booknlp = False
# lists to hold mistakes in the detection (used for the recognition of challenges)
list_false_negatives = []
list_false_positives = []
list_correct = []
for index, original_word_x, booknlp, original_word_y, gs in merged_booknlp_dekkeretal.itertuples(index=True):
'''
if original_word_x != original_word_y:
print ("Mismatch in row ", index, ": ", original_word_x , " is not the same as ", original_word_y)
break
'''
if original_word_x != original_word_y:
if (original_word_y == '-LRB-' and original_word_x == '(') or (original_word_y == '-RRB-' and original_word_x == ')') or (original_word_y == '-LSB-' and original_word_x == '[') or (original_word_y == '-RSB-' and original_word_x == ']'):
pass
elif (original_word_y in ['`',"``","--","''","'",'--']) and (original_word_x in ["‘","' \" '",'"',"“",'-',"”","'",",","’","—","_"]):
pass
elif (re.match(r"[a-zA-Z]*'[a-zA-Z]+", original_word_y)) and (re.match(r"[a-zA-Z]*’[a-zA-Z]+", original_word_x)):
pass
elif (re.match(r"[a-zA-Z]*’[a-zA-Z]+", original_word_y)) and (re.match(r"[a-zA-Z]*'[a-zA-Z]+", original_word_x)):
pass
elif (original_word_y == "`" and original_word_x == "'t") or (original_word_y == "tis" and original_word_x == "is") or (original_word_y == "honourable" and original_word_x == "honorable") or (original_word_y == "honour" and original_word_x == "honor"):
pass
else:
print ("Mismatch in row ", index, ": ", original_word_x , " is not the same as ", original_word_y)
break
if gs == 'I-PERSON':
if false_positive_booknlp == True:
list_false_positives.append(range_ne)
range_ne = []
false_positive_booknlp = False
range_ne.append(index)
continue
else:
range_ne.append(index)
elif gs == 'O':
if booknlp == 'PER':
if false_positive_booknlp == False: #first occurence of wrong
if len(range_ne) > 0 and false_negative_booknlp == False: # there was a correct detection immediatelly before
list_correct.append(range_ne)
range_ne = []
false_positive_booknlp = True
range_ne.append(index)
continue
elif false_positive_booknlp == True:
range_ne.append(index)
continue
elif booknlp == 'O' and false_positive_booknlp == True:
list_false_positives.append(range_ne)
range_ne = []
false_positive_booknlp = False
continue
elif len(range_ne) > 0 and false_positive_booknlp == False: #if it is the end of a gold standard entity
for line in range_ne:
if merged_booknlp_dekkeretal.iloc[line]['booknlp'] == 'PER':
continue
else: # if booknlp didn't detect it
false_negative_booknlp = True
if false_negative_booknlp == True:
list_false_negatives.append(range_ne)
false_negative_booknlp = False
else:
list_correct.append(range_ne)
range_ne = []
elif booknlp == 'O' and false_negative_booknlp == True:
list_false_negatives.append(range_ne)
false_negative_booknlp = False
range_ne = []
elif booknlp == 'O' and false_negative_booknlp == False and false_positive_booknlp == False:
continue
else:
# add error handling in case of a mistake
print ("1. Semantical mistake in analysing line ", index)
break
else:
# add error handling in case of a mistake
print ("Semantical mistake in analysing line ", index)
break
print("list_false_negatives",list_false_negatives)
for i in list_false_negatives:
print(merged_booknlp_dekkeretal.iloc[i[0]-1:i[-1]+2])
print("list_false_positives",list_false_positives)
for i in list_false_positives:
print(merged_booknlp_dekkeretal.iloc[i[0]-1:i[-1]+2])
print("list_correct",list_correct)
for i in list_correct:
print(merged_booknlp_dekkeretal.iloc[i[0]-1:i[-1]+2])
#####################################################################
# get evaluation metrics and save to files (BookNLP & Dekker et al.)
#####################################################################
path_evaluation = '/mnt/Git/results/overlap/booknlp_dekkeretal_evaluation.csv'
path_fp = '/mnt/Git/results/overlap/booknlp_dekkeretal_false_positive.csv'
path_fn = '/mnt/Git/results/overlap/booknlp_dekkeretal_false_negative.csv'
#todo outcomment in the end
get_metrics(merged_booknlp_dekkeretal, list_correct, list_false_positives, list_false_negatives, path_evaluation, path_fp, path_fn, passed_variable)
########################################################################################################################################################################
##################################
# get gold standard - Litbank
##################################
gs_lb = | pd.read_csv(litbank_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=[0,1], names=["original_word", "gs"]) | pandas.read_csv |
"""
@name: syn_size_violoin.py
@description:
plot distribution of synapse sizes
@author: <NAME>
@email: "cabrittin"+ <at>+ "gmail"+ "."+ "com"
@date: 2020-03
"""
import os
import argparse
from configparser import ConfigParser,ExtendedInterpolation
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy import stats
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.ticker import MultipleLocator
from connectome.load import from_db
from connectome.format_graphs import *
#from networks.classification_measures import *
import ioaux
mpl.rcParams['xtick.labelsize'] = 5
mpl.rcParams['ytick.labelsize'] = 5
#CONFIG = os.environ['CONFIG']
CONFIG = 'configs/config.ini'
def clean_graph(Ref,A,idx=4):
H = nx.Graph()
if Ref.is_directed(): H = nx.DiGraph()
for (a,b) in Ref.edges():
if A.has_edge(a,b) and Ref[a][b]['id'] <= idx:
H.add_edge(a,b,weight=Ref[a][b]['weight'],id=Ref[a][b]['id'])
return H
def build_data(Ref,label):
data = []
for (a,b) in Ref.edges():
data.append([Ref[a][b]['id'],Ref[a][b]['weight'],label])
return data
def cohend(x,y):
mu1 = np.mean(x)
mu2 = np.mean(y)
n1 = len(x) - 1
n2 = len(y) - 1
sig1 = np.std(x,ddof=1)
sig2 = np.std(y,ddof=1)
s = np.sqrt((n1*sig1**2 + n2*sig2**2)/(n1+n2))
return abs(mu1 - mu2) / s
def run(_cfg,fout=None,source_data=None):
cfg = ConfigParser(interpolation=ExtendedInterpolation())
cfg.read(_cfg)
A,C,E,W,Z,Wg = {},{},{},{},{},{}
for i in range(1,5):
A[i] = nx.read_graphml(cfg['refgraphs']['adj']%i)
C[i] = nx.read_graphml(cfg['refgraphs']['chem']%i)
E[i] = nx.read_graphml(cfg['refgraphs']['gap']%i)
W[i] = nx.read_graphml(cfg['zhen']['white_chem']%i)
Wg[i] = nx.read_graphml(cfg['zhen']['white_gap']%i)
Z[i] = nx.read_graphml(cfg['zhen']['zhen_chem']%i)
D = nx.DiGraph()
for (a,b) in C[4].edges():
if A[4].has_edge(a,b): D.add_edge(a,b)
print(D.number_of_edges())
print(C[4].number_of_edges(),C[4].number_of_nodes())
#print(sorted(C[4].nodes()))
print(A[4].number_of_edges())
Ref = make_reference_graphs(C)#,remove=axon_nogo)
print(Ref.number_of_edges())
Ref = clean_graph(Ref,A[4])
print(Ref.number_of_edges())
chem = build_data(Ref,'Cook2019')
#for c in chem: if c[1] < 0: print(c)
#cf = pd.DataFrame(data=chem,columns=['delta','# EM sections'])
wRef = make_reference_graphs(W)
ref = nx.DiGraph()
for (a,b) in wRef.edges():
if not Ref.has_edge(a,b): continue
ref.add_edge(a,b,weight=Ref[a][b]['weight'],id=wRef[a][b]['id'])
##Count number 1 EM sections synapses in Cook not in white
bool_count = 0
not_white = 0
for (a,b) in Ref.edges():
notwhite = not ref.has_edge(a,b)
em1 = (Ref[a][b]['weight'] == 1)
if notwhite: not_white += 1
if notwhite and em1: bool_count += 1
print(f'Number of cook~white 1em: {bool_count}/{not_white}')
wchem = build_data(ref,'White1986')
#wf = pd.DataFrame(data=wchem,columns=['delta','# EM sections'])
zRef = make_reference_graphs(Z)
ref = nx.DiGraph()
for (a,b) in wRef.edges():
if not Ref.has_edge(a,b): continue
ref.add_edge(a,b,weight=Ref[a][b]['weight'],id=wRef[a][b]['id'])
zchem = build_data(ref,'Witvliet2020')
zf = pd.DataFrame(data=zchem,columns=['delta','# EM sections','Data source'])
chem = chem + wchem
cf = | pd.DataFrame(data=chem,columns=['delta','# EM sections','Data source']) | pandas.DataFrame |
#!/usr/bin/env python3
import subprocess, os,time,gzip
import pandas as pd
import numpy as np
from functools import reduce
from .convertor import mergeToSample, calTNzcore, rmEntrez, tpmToFpkm, mapEm2Gene, formatClin, pick,formatDrug
from .outformat import storeData
import requests,json,re,io
from .setting import CLIN_INFO, Biospecimen_INFO, Biospecimen_MAP, CLIN_MAP, PAM50_PATH, DRUG_MAP
class GdcApi(object):
'''
API for download files from GDC
'''
__slot__ = ["files_endpt", "data_endpt", "cancer", "parental_dir",'cases_endpt']
def __init__(self, cancer, parental_dir, cases_endpt='https://api.gdc.cancer.gov/cases', data_endpt="https://api.gdc.cancer.gov/data", files_endpt="https://api.gdc.cancer.gov/files", **kwargs):
''' Intialize instance parameters
Parameters
----------
cancer : str
Cancer type
parental_dir : str
Path to store datas
data_endpt : str, optional
[Endpoint for files id searching] (the default is "https://api.gdc.cancer.gov/data")
files_endpt : str, optional
[Endpoint for files downloading] (the default is "https://api.gdc.cancer.gov/files")
'''
self.files_endpt = files_endpt
self.data_endpt = data_endpt
self.cancer = cancer
self.parental_dir = parental_dir
self.cases_endpt = cases_endpt
def _projFilter(self, data_type,method=None):
dtype_dict = {
"cnv_segment_somatic": "Masked Copy Number Segment",
"cnv_segment_all": "Copy Number Segment",
"masked_somatic_mutation":"Masked Somatic Mutation",
}
filters = {
"op": "and",
"content":[
{
"op": "in",
"content": {
"field": "files.data_type",
"value": [
dtype_dict[data_type]
]
}
},
{
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
},
]
}
# specific for SNV on TCGA (Calling by four different tools)
if method != None:
filters['content'].append({
"op":"in",
"content":{
"field": "files.analysis.workflow_type",
"value":[
"{} Variant Aggregation and Masking".format(method)
]
}
})
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "3000"
}
return params
def _nameFilter(self, data_type):
dtype_dict = {
'drug': "nationwidechildrens.org_clinical_drug_{}.txt".format(self.cancer.lower()),
'gistic': '{}.focal_score_by_genes.txt'.format(self.cancer.upper()),
# 'survival': "nationwidechildrens.org_clinical_follow_up_v{0}_{1}.txt".format(CLIN_VERSION[self.cancer], self.cancer.lower()),
'patient': "nationwidechildrens.org_clinical_patient_{}.txt".format(self.cancer.lower()),
'aliquot': "nationwidechildrens.org_biospecimen_aliquot_{}.txt".format(self.cancer.lower()),
'slide': "nationwidechildrens.org_biospecimen_slide_{}.txt".format(self.cancer.lower()),
'sample': "nationwidechildrens.org_biospecimen_sample_{}.txt".format(self.cancer.lower()),
'auxilary': "nationwidechildrens.org_auxiliary_{}.txt".format(self.cancer.lower()),
}
filters = {
"op": "in",
"content": {
"field": "files.file_name",
"value": [
dtype_dict[data_type]
]
}
}
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "1"
}
return params
def _fetchFileID(self, data_type, by_name=True,method=None):
''' Get files id by upstream filter parameters
Parameters
----------
data_type : str
Data type to be download. eg. gistic
by_name : bool, optional
Whether getting files id by matching file names (the default is True).
If not, we will use project filtering options to get file id list.
Returns
-------
list
A list contains file ids.
'''
if by_name is True:
file_uuid_list = []
params = self._nameFilter(data_type)
response = requests.get(self.files_endpt, params=params)
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
else:
file_uuid_list = []
params = self._projFilter(data_type,method=method)
response = requests.get(self.files_endpt, params=params)
if "message" in json.loads(response.content.decode("utf-8")).keys():
return None, 'Not found'
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
if len(file_uuid_list) == 0:
return None,'Not found'
else:
return file_uuid_list,None
def getTableFromFiles(self, data_type, by_name=True,method=None,**kwargs):
'''
Merging tables downloaded by a list of file ids
'''
try:
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
except requests.exceptions.SSLError:
time.sleep(10)
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
if error != None:
return None, error
ready_to_merge = []
if len(file_uuid_list) == 0 :
return None, 'Cannot find any file.'
for ids in file_uuid_list:
params = {"ids": [ids]}
try:
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
except requests.exceptions.SSLError:
time.sleep(10)
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
if method != None:
temp_file = self.cancer+'_'+method+"_snv_tmp.gz"
file = open(temp_file, "wb")
file.write(response.content)
file.close()
df = | pd.read_table(temp_file, **kwargs) | pandas.read_table |
# Numpy Arrays;
# Numpy arrays are great alternatives to Python Lists. Some of the key advantages of Numpy arrays are that
# they are fast, easy to work with, and give users the opportunity to perform calculations across entire arrays.
import numpy as np
height = [1.87, 1.87, 1.82, 1.91, 1.90, 1.85]
weight = [81.65, 97.52, 95.25, 92.98, 86.18, 88.45]
np_height = np.array(height)
np_weight = np.array(weight)
print(type(np_height))
# Element-wise calculations;
# We can perform element-wise calculations on height and weight. For example, you could take all 6 of the height
# and weight observations above, and calculate the BMI for each observation with a single equation.
# These operations are very fast and computationally efficient.
# They are particularly helpful when you have 1000s of observations in your data.
bmi = np_weight / np_height ** 2
print(bmi)
# Subsetting;
# Another great feature of Numpy arrays is the ability to subset. For instance, if you wanted to know
# which observations in our BMI array are above 23, we could quickly subset it to find out.
print(bmi[bmi > 23])
# Pandas Basics;
# Pandas DataFrames;
# Pandas is a high-level data manipulation tool. It is built on the Numpy package and its key data structure
# is called the DataFrame. DataFrames allow you to store and manipulate tabular data in rows of observations and
# columns of variables. There are several ways to create a DataFrame. One way is to use a dictionary.
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98]}
import pandas as pd
brics = | pd.DataFrame(dict) | pandas.DataFrame |
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping}
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_agg_extension(self, data_for_grouping):
# GH#38980 groupby agg on extension type fails for non-numeric types
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
expected = df.iloc[[0, 2, 4, 7]]
expected = expected.set_index("A")
result = df.groupby("A").agg({"B": "first"})
self.assert_frame_equal(result, expected)
result = df.groupby("A").agg("first")
self.assert_frame_equal(result, expected)
result = df.groupby("A").first()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
expected = pd.Series([1.0, 3.0, 4.0], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
df.groupby("B").apply(groupby_apply_op)
df.groupby("B").A.apply(groupby_apply_op)
df.groupby("A").apply(groupby_apply_op)
df.groupby("A").B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
else:
expected = pd.Index(["C"])
| tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import overhang.tree as tree
import overhang.reaction_node as node
import logging
from overhang.dnastorage_utils.system.dnafile import *
import os
import sys
import shutil
import math
import numpy as np
import overhang.plot_utils.plot_utils as plt_util
import time
import pickle as pi
import gc
import pandas as pd
import scipy
tlogger=logging.getLogger('dna.overhang.tools.tree_analysis')
tlogger.addHandler(logging.NullHandler())
def _sweep_overhangs_1_bit(self,data_buffer,workloadID,debug_fname=None): #workloadID is a tuple indicating the complete name of the workload
output_filename=debug_fname
overhang_list=[3,5,9,17,65]
#overhang_list=[9,17,65]
strand_length_bytes=509 #508 bytes to account for the extra 4 bytes of indexing
index_bytes=3
root_prefix=os.path.normpath(self._out_dir[workloadID[0]]["root"])+'/'
#set up result dictionary for the file we are analyzing
if workloadID[0] not in self._1_bit_results:
self._1_bit_results[workloadID[0]]={}
if workloadID[1] not in self._1_bit_results[workloadID[0]]:
self._1_bit_results[workloadID[0]][workloadID[1]]={}
else:
if workloadID[1] not in self._1_bit_results[workloadID[0]]:
self._1_bit_results[workloadID[0]][workloadID[1]]={}
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["transform_reaction_count"]=[]
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["overhang_array"]=overhang_list
self._1_bit_results[workloadID[0]][workloadID[1]]["no_opt_reaction_count"]=[] #array of integers
self._1_bit_results[workloadID[0]][workloadID[1]]["rotate_reaction_count"]=[]
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_height_map"]=np.zeros((len(overhang_list),int(math.ceil(math.log((strand_length_bytes+index_bytes)*8,2)))),dtype=np.uint) #build np array to be used as heat map
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_height_map"]=np.zeros((len(overhang_list),int(math.ceil(math.log((strand_length_bytes+index_bytes)*8,2)))),dtype=np.uint)
for overhang_index,overhang_count in enumerate(overhang_list):
print("{} {}".format(workloadID,overhang_count))
dna_file=OverhangBitStringWriteDNAFile(primer5=self._primer5, formatid=self._format_ID, primer3=self._primer3, out_fd=output_filename,fsmd_abbrev='OH_BITSTRING_XXX',\
bits_per_block=1,strand_length=strand_length_bytes*8,num_overhangs=overhang_count)
dna_file.write(data_buffer)
dna_file.header_flush()
strand_list=dna_file.get_strands() #get strands after encoding
start_time=time.time()
transform_tree=tree.construct_tree_transform_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=None) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["transform_reaction_count"].append(transform_tree.order())
#print transform_tree.order()
del transform_tree
print("---- transform tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
optimized_tree=tree.construct_tree_baseopt_lite_w(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=self._1_bit_results[workloadID[0]][workloadID[1]]["opt_height_map"][overhang_index][:]) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["opt_reaction_count"].append(optimized_tree.order())
#print optimized_tree.order()
opt_hash=optimized_tree.strand_hash_table #grab the hash table
del optimized_tree
print("---- optimized tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
rotate_tree=tree.construct_tree_rotate_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=None,opt_dictionary=opt_hash) #+4 for the number of bytes used for indexing
self._1_bit_results[workloadID[0]][workloadID[1]]["rotate_reaction_count"].append(rotate_tree.order())
#print transform_tree.order()
del rotate_tree
print("---- rotate tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
unoptimized_tree=tree.construct_tree_unoptimized_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8)
self._1_bit_results[workloadID[0]][workloadID[1]]["no_opt_reaction_count"].append(unoptimized_tree.order())
del unoptimized_tree
gc.collect()
print("---- no optimized tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
start_time=time.time()
ideal_tree=tree.construct_tree_ideal_lite(strand_list,overhang_count,int(math.ceil(math.log(overhang_count,4))),1,
(index_bytes+strand_length_bytes)*8,h_array=self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_height_map"][overhang_index][:])
self._1_bit_results[workloadID[0]][workloadID[1]]["ideal_reaction_count"].append(ideal_tree.order())
del ideal_tree
gc.collect()
print("---- ideal tree build on {} took {} seconds ---".format(workloadID[1],time.time()-start_time))
tlogger.debug('Finished building trees for '+output_filename)
#collect the number of nodes in the constructed graphs, this equals the number of reactions the have to be performed
sys.stdout.flush()
#checkpoint results by pickling data for each file
picklefile=open(root_prefix+'1_bit_results','wb')
pi.dump(self._1_bit_results,picklefile)
picklefile.close()#store the ultimate results file
def analyze_1_bit(self):
#analyze all workloads across different overhangs and data-in-block sizes
for category in self._workloadDict:
for work_file in self._workloadDict[category]:
data_buffer=self._workloadDict[category][work_file]
output_filename=category+'_'+work_file+'.output'
self._sweep_overhangs_1_bit(data_buffer,(category,work_file),output_filename)
#draw figures and dump results to csv
def draw_1_bit(self):
assert len(self._1_bit_results)>0
#figure font settings
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font',**font)
#draw results from sweeping the number of overhangs for 1 bit building blocks
#create line graphs for optimized and unoptimized reaction counts
for category in self._1_bit_results:
#self._out_dir and self._1_bit_results should both have the same keys and key structure
root_prefix=os.path.normpath(self._out_dir[category]["root"])+'/'
#create plots and axes for each category of data
opt_react_norm_fig,opt_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
no_opt_react_norm_fig,no_opt_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
opt_to_no_opt_normalized_fig, opt_to_no_opt_normalized_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2), constrained_layout=True)
ideal_to_no_opt_fig, ideal_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(9,2.5),constrained_layout=True)
opt_react_raw_fig,opt_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(3,2.5), constrained_layout=True)
no_opt_react_raw_fig,no_opt_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
ideal_react_norm_fig,ideal_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
ideal_react_raw_fig,ideal_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
#transform optimization graphs
transform_react_norm_fig,transform_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
transform_react_raw_fig,transform_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
transform_to_no_opt_fig, transform_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
#rotate optimization graphs
rotate_react_norm_fig,rotate_react_norm_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5), constrained_layout=True)
rotate_react_raw_fig,rotate_react_raw_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
rotate_to_no_opt_fig, rotate_to_no_opt_axes=plt.subplots(nrows=1,ncols=1,figsize=(5.2,2.5),constrained_layout=True)
#dump files
opt_react_raw_dump=open(root_prefix+'1_bit_opt_raw_'+category+'.csv','w+')
no_opt_react_raw_dump=open(root_prefix+'1_bit_no_opt_raw_'+category+'.csv','w+')
opt_react_norm_dump=open(root_prefix+'1_bit_opt_react_norm_'+category+'.csv','w+')
ideal_react_norm_dump=open(root_prefix+'1_bit_ideal_react_norm_'+category+'.csv','w+')
no_opt_react_norm_dump=open(root_prefix+'1_bit_no_opt_react_norm_'+category+'.csv','w+')
opt_to_no_opt_dump=open(root_prefix+'1_bit_opt_to_no_opt_'+category+'.csv','w+')
ideal_to_no_opt_dump=open(root_prefix+'1_bit_ideal_to_no_opt_'+category+'.csv','w+')
ideal_react_raw_dump=open(root_prefix+'1_bit_ideal_raw_'+category+'.csv','w+')
#transform dump files
transform_to_no_opt_dump=open(root_prefix+'1_bit_transform_to_no_opt_'+category+'.csv','w+')
transform_react_raw_dump=open(root_prefix+'1_bit_transform_raw_'+category+'.csv','w+')
transform_react_norm_dump=open(root_prefix+'1_bit_transform_react_norm_'+category+'.csv','w+')
#rotate dump files
rotate_to_no_opt_dump=open(root_prefix+'1_bit_rotate_to_no_opt_'+category+'.csv','w+')
rotate_react_raw_dump=open(root_prefix+'1_bit_rotate_raw_'+category+'.csv','w+')
rotate_react_norm_dump=open(root_prefix+'1_bit_rotate_react_norm_'+category+'.csv','w+')
#arrays to hold data to be plotted
file_name_array=[]
opt_react_norm_data_array=[]
no_opt_react_norm_data_array=[]
opt_to_no_opt_data_array=[]
ideal_react_norm_data_array=[]
ideal_to_no_opt_data_array=[]
opt_react_raw_data_array=[]
ideal_react_raw_data_array=[]
no_opt_react_raw_data_array=[]
#transform arrays
transform_react_norm_data_array=[]
transform_to_no_opt_data_array=[]
transform_react_raw_data_array=[]
#transform arrays
rotate_react_norm_data_array=[]
rotate_to_no_opt_data_array=[]
rotate_react_raw_data_array=[]
#gather together data for each category and normalize results when necessary
for _file in self._1_bit_results[category]:
file_prefix=os.path.normpath(self._out_dir[category][_file])+'/'
opt_heat_map_dump=open(file_prefix+'opt_heat_map'+category+"_"+_file+'.csv','w+')
ideal_heat_map_dump=open(file_prefix+'ideal_heat_map'+category+'_'+_file+'.csv','w+')
#heat maps
ideal_heat_fig,ideal_heat_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5), constrained_layout=True)
opt_heat_fig,opt_heat_axes=plt.subplots(nrows=1,ncols=1,figsize=(6,2.5),constrained_layout=True)
resultsDict=self._1_bit_results[category][_file]
#get data for heat maps
ideal_heat_data=resultsDict["ideal_height_map"]
opt_heat_data=resultsDict["opt_height_map"]
#get data for line and group them together into arrays
overhang_array=resultsDict["overhang_array"]
opt_react_raw_data_array.append(resultsDict["opt_reaction_count"])
no_opt_react_raw_data_array.append(resultsDict["no_opt_reaction_count"])
ideal_react_raw_data_array.append(resultsDict["ideal_reaction_count"])
opt_react_norm_data=[float(_)/float(resultsDict["opt_reaction_count"][0]) for _ in resultsDict["opt_reaction_count"]]
ideal_react_norm_data=[float(_)/float(resultsDict["ideal_reaction_count"][0]) for _ in resultsDict["ideal_reaction_count"]]
no_opt_react_norm_data=[float(_)/float(resultsDict["no_opt_reaction_count"][0]) for _ in resultsDict["no_opt_reaction_count"]]
opt_to_no_opt_data=[float(opt)/float(no_opt) for opt,no_opt in zip(resultsDict["opt_reaction_count"],resultsDict["no_opt_reaction_count"])]
ideal_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["ideal_reaction_count"],resultsDict["no_opt_reaction_count"])]
#transform calcs
transform_react_raw_data_array.append(resultsDict["transform_reaction_count"])
transform_react_norm_data=[float(_)/float(resultsDict["transform_reaction_count"][0]) for _ in resultsDict["transform_reaction_count"]]
transform_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["transform_reaction_count"],resultsDict["no_opt_reaction_count"])]
#rotate calcs
rotate_react_raw_data_array.append(resultsDict["rotate_reaction_count"])
rotate_react_norm_data=[float(_)/float(resultsDict["rotate_reaction_count"][0]) for _ in resultsDict["rotate_reaction_count"]]
rotate_to_no_opt_data=[float(ideal)/float(no_opt) for ideal,no_opt in zip(resultsDict["rotate_reaction_count"],resultsDict["no_opt_reaction_count"])]
if "." in _file:
file_name_array.append(_file.split('.')[0])
else:
file_name_array.append(_file)
opt_react_norm_data_array.append(opt_react_norm_data)
ideal_react_norm_data_array.append(ideal_react_norm_data)
no_opt_react_norm_data_array.append(no_opt_react_norm_data)
opt_to_no_opt_data_array.append(opt_to_no_opt_data)
ideal_to_no_opt_data_array.append(ideal_to_no_opt_data)
#append transform data
transform_to_no_opt_data_array.append(transform_to_no_opt_data)
transform_react_norm_data_array.append(transform_react_norm_data)
#append rotate data
rotate_to_no_opt_data_array.append(rotate_to_no_opt_data)
rotate_react_norm_data_array.append(rotate_react_norm_data)
#plot the heat maps, 1 for each file and 1 for ideal/opt (may want to compress opt and ideal into one: 1 array or 2 subplots)
heat_fig_label=category+" "+_file+" "
heat_xLabel="height in tree"
heat_yLabel="number of overhangs"
cbar_label="match count"
plt_util.plot_heatmap(ideal_heat_data,ideal_heat_axes,overhang_array,range(1,len(ideal_heat_data[0][:])+1),heat_fig_label+" ideal",heat_xLabel,heat_yLabel,cbar_label,dumpFile=ideal_heat_map_dump, fontsize=4)
plt_util.plot_heatmap(opt_heat_data,opt_heat_axes,overhang_array,range(1,len(ideal_heat_data[0][:])+1),heat_fig_label+" opt",heat_xLabel,heat_yLabel,cbar_label,dumpFile=opt_heat_map_dump,fontsize=4)
#save a heat map for each file studied
opt_heat_fig.savefig(file_prefix+'opt_heat_map_'+category+"_"+_file+'.eps',format='eps')
ideal_heat_fig.savefig(file_prefix+'ideal_heat_map_'+category+"_"+_file+'.eps',format='eps')
opt_heat_map_dump.close()
ideal_heat_map_dump.close()
markerSet=(None,None,'o','^','x','D',None,None,None,'H','+','X')
linestyleSet=('-','-','-','-','--','-','-','-','-','--','--','--')
markeverySet=[1]*12
#draw line charts
#optimized reactions raw graph
plt_util.plot_components_wrapper(overhang_array,opt_react_raw_axes,opt_react_raw_data_array,category+" (opt raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_raw_axes.get_position()
opt_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_react_raw_axes.get_legend().remove()
opt_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#ideal reactions raw graph
plt_util.plot_components_wrapper(overhang_array,ideal_react_raw_axes,ideal_react_raw_data_array,category+" (ideal raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_raw_axes.get_position()
ideal_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_react_raw_axes.get_legend().remove()
ideal_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#trasform raw graph
plt_util.plot_components_wrapper(overhang_array,transform_react_raw_axes,transform_react_raw_data_array,category+" (transform raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_react_raw_axes.get_position()
transform_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_react_raw_axes.get_legend().remove()
transform_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#trasform raw graph
plt_util.plot_components_wrapper(overhang_array,rotate_react_raw_axes,rotate_react_raw_data_array,category+" (rotate raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_react_raw_axes.get_position()
rotate_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_react_raw_axes.get_legend().remove()
rotate_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#No optimized reactions raw graph
plt_util.plot_components_wrapper(overhang_array,no_opt_react_raw_axes,no_opt_react_raw_data_array,category+" (no-opt raw reaction count)","Number of Overhangs","Number of Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=no_opt_react_raw_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = no_opt_react_raw_axes.get_position()
no_opt_react_raw_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
no_opt_react_raw_axes.get_legend().remove()
no_opt_react_raw_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.4,0.7),ncol=1)
#opt self normalized graph
plt_util.plot_components_wrapper(overhang_array,opt_react_norm_axes,opt_react_norm_data_array,category+" (opt normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_react_norm_axes.get_position()
opt_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_react_norm_axes.get_legend().remove()
opt_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.5,0.68),ncol=1)
#no opt self normalized graph
plt_util.plot_components_wrapper(overhang_array,no_opt_react_norm_axes,no_opt_react_norm_data_array,category+" (no opt normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=no_opt_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = no_opt_react_norm_axes.get_position()
no_opt_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
no_opt_react_norm_axes.get_legend().remove()
no_opt_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.61,0.43),ncol=1)
#ideal self normalized graph
plt_util.plot_components_wrapper(overhang_array,ideal_react_norm_axes,ideal_react_norm_data_array,category+" (ideal normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = ideal_react_norm_axes.get_position()
ideal_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.835,0.7),ncol=1)
#transform self normalized
plt_util.plot_components_wrapper(overhang_array,transform_react_norm_axes,transform_react_norm_data_array,category+" (transform normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_react_norm_axes.get_position()
transform_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bb
#rotate self normalized
plt_util.plot_components_wrapper(overhang_array,rotate_react_norm_axes,rotate_react_norm_data_array,category+" (rotate normalized)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_react_norm_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_react_norm_axes.get_position()
rotate_react_norm_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_react_norm_axes.get_legend().remove()
#ideal_react_norm_fig.legend(fontsize=4,loc='center',bb
#opt reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,opt_to_no_opt_normalized_axes,opt_to_no_opt_data_array,category + " (opt/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=opt_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = opt_to_no_opt_normalized_axes.get_position()
opt_to_no_opt_normalized_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
opt_to_no_opt_normalized_axes.get_legend().remove()
#opt_to_no_opt_normalized_fig.legend(fontsize=4.3,loc='center',bbox_to_anchor=(0.3,0.68),ncol=1)
opt_to_no_opt_normalized_fig.legend(fontsize=4.3,loc='center',bbox_to_anchor=(0.61,0.43),ncol=1)
#ideal reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,ideal_to_no_opt_axes,ideal_to_no_opt_data_array,category + " (ideal/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=ideal_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = ideal_to_no_opt_axes.get_position()
ideal_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
ideal_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#transform reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,transform_to_no_opt_axes,transform_to_no_opt_data_array,category + " (transform/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=transform_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = transform_to_no_opt_axes.get_position()
transform_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
transform_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#rotate reactions normalized to no-opt reactions
plt_util.plot_components_wrapper(overhang_array,rotate_to_no_opt_axes,rotate_to_no_opt_data_array,category + " (rotate/no-opt)","Number of Overhangs","Normalized Reactions",labelSet=file_name_array,linestyleSet=linestyleSet,markerSet=markerSet,dumpFile=rotate_to_no_opt_dump,linewidth_=0.7,markeverySet=markeverySet)
chartBox = rotate_to_no_opt_axes.get_position()
rotate_to_no_opt_axes.set_position([chartBox.x0+0.1, chartBox.y0+0.1, chartBox.width*0.6, chartBox.height*0.9])
rotate_to_no_opt_axes.get_legend().remove()
#ideal_to_no_opt_fig.legend(fontsize=4,loc='center',bbox_to_anchor=(0.729,0.7),ncol=1)
#close dump files
opt_react_norm_dump.close()
no_opt_react_norm_dump.close()
opt_to_no_opt_dump.close()
ideal_to_no_opt_dump.close()
opt_react_raw_dump.close()
no_opt_react_raw_dump.close()
ideal_react_raw_dump.close()
ideal_react_norm_dump.close()
#close transform dumps
transform_react_raw_dump.close()
transform_react_norm_dump.close()
transform_to_no_opt_dump.close()
#close rotate dumps
rotate_react_raw_dump.close()
rotate_react_norm_dump.close()
rotate_to_no_opt_dump.close()
#ideal to no opt bar graph
df=pd.read_csv(root_prefix+'1_bit_ideal_to_no_opt_'+category+'.csv',index_col=0)
#calculate the geomean on the dataframe
geomean=[_ for _ in scipy.stats.gmean(df.iloc[:,:],axis=1)]
_dict={"geomean":geomean}
_df=pd.DataFrame(_dict,index=df.index)
df_ideal=pd.concat([df, _df],axis=1, sort=False)
ideal_to_no_opt_bar_fig=plt_util.pandas_barchart(df_ideal.transpose(),[3,5,9,17,65],"(ideal/no-opt)","Normalized Reactions",True,True,True,(0.007,1.1))
ideal_to_no_opt_bar_fig.axes[0].get_legend().remove()
legend=ideal_to_no_opt_bar_fig.legend(fontsize=8,loc='center',bbox_to_anchor=(0.935,0.5),ncol=1,title=r'$|\mathcal{O}|$',markerscale=1.2)
legend.get_title().set_fontsize('10') #legend 'Title' fontsize
#transform to no opt bar graph
df=pd.read_csv(root_prefix+'1_bit_transform_to_no_opt_'+category+'.csv',index_col=0)
#calculate the geomean on the dataframe
geomean=[_ for _ in scipy.stats.gmean(df.iloc[:,:],axis=1)]
_dict={"geomean":geomean}
_df=pd.DataFrame(_dict,index=df.index)
df_transform=pd.concat([df, _df],axis=1, sort=False)
if 'zpaq' in category:
transform_to_no_opt_bar_fig=plt_util.pandas_barchart(df_transform.transpose(),[3,5,9,17,65],"(transform/no-opt) zpaq","Normalized Reactions",True,True,True,(0.007,1.1))
else:
transform_to_no_opt_bar_fig=plt_util.pandas_barchart(df_transform.transpose(),[3,5,9,17,65],"transform/no-opt","Normalized Reactions",True,True,True,(0.007,1.1))
transform_to_no_opt_bar_fig.axes[0].get_legend().remove()
transform_to_no_opt_bar_fig.legend(fontsize=6,loc='center',bbox_to_anchor=(0.28,0.92),ncol=len(df.index))
#rotate to no opt bar graph
df=pd.read_csv(root_prefix+'1_bit_rotate_to_no_opt_'+category+'.csv',index_col=0)
#calculate the geomean on the dataframe
geomean=[_ for _ in scipy.stats.gmean(df.iloc[:,:],axis=1)]
_dict={"geomean":geomean}
_df=pd.DataFrame(_dict,index=df.index)
df_rotate=pd.concat([df, _df],axis=1, sort=False)
if 'zpaq' in category:
rotate_to_no_opt_bar_fig=plt_util.pandas_barchart(df_rotate.transpose(),[3,5,9,17,65],"(rotate/no-opt) zpaq","Normalized Reactions",True,True,True,(0.007,1.1))
else:
rotate_to_no_opt_bar_fig=plt_util.pandas_barchart(df_rotate.transpose(),[3,5,9,17,65],"rotate/no-opt","Normalized Reactions",True,True,True,(0.007,1.1))
rotate_to_no_opt_bar_fig.axes[0].get_legend().remove()
rotate_to_no_opt_bar_fig.legend(fontsize=6,loc='center',bbox_to_anchor=(0.28,0.92),ncol=len(df.index))
#opt to no opt bar graph
df= | pd.read_csv(root_prefix+'1_bit_opt_to_no_opt_'+category+'.csv',index_col=0) | pandas.read_csv |
import string
import random
import pathlib
import numpy as np
import pandas as pd
from scipy import stats
path = pathlib.Path(
'~/dev/python/python1024/data/dataproc/006analysis/case').expanduser()
shop_path = path.joinpath('店铺基本数据.xlsx')
# 产品列表
product_list = [f'产品{c}' for c in string.ascii_uppercase]
# 产品价格/成本列表
product_price_list = np.random.randint(12, 31, len(product_list))
product_dict = dict(zip(product_list, product_price_list))
# 付款方式
pay_p = [0.5, 0.2, 0.1, 0.06, 0.1, 0.03, 0.01]
pay_list = ['微信', '支付宝', '银行卡', '饿了么', '美团', 'POS', '现金']
# 就餐形式
dining_p = [0.4, 0.2, 0.4]
dining_list = ['堂食', '打包', '外卖']
# 折扣率
discount_p = [0.4, 0.1, 0.1, 0.2, 0.1, 0.05, 0.05]
discount_list = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
# 订单备注
comment_p = [0.7, 0.05, 0.05, 0.03, 0.02, 0.01,
0.02, 0.02, 0.03, 0.03, 0.02, 0.02] # 概率分布
comment_list = ['', '少糖', '加糖', '少冰', '去冰',
'加冰', '加奶', '无奶', '少珍珠', '多珍珠', '去柠檬', '加柠檬']
# 订单明细,主要是单品,有部分2品、3品、4品、5品,分布概率
productn_p = [0.8, 0.1, 0.06, 0.02, 0.02]
def init_shops(n_shop=100):
"""
初始化基础数据
:param n_shop: 默认初始化100个门店
:return : df_shop, DataFrame
"""
shop_list = [f'SP{i:04d}' for i in range(n_shop)]
# 每个门店有[800~15000)个用户
shop_user_size = np.random.randint(800, 15000, n_shop)
# 门店x的用户范围:shop_user_list[x-1] ~ shop_user_list[x],初始值0
shop_user_list = shop_user_size.cumsum()
# 各门店用户ID起点,用户默认从小到大编号
shop_user_start = np.insert(shop_user_list[:-1], 0, 0)
# 各门店产品清单,每个门店[5,26)个产品
shop_product_list = [np.sort(np.random.choice(
product_list, np.random.randint(5, 26))) for i in range(n_shop)]
# 门店成立时间
shop_start_dates = np.random.choice(pd.period_range(
'2015-01-01', '2018-12-31', freq='M'), size=n_shop)
# 生成门店基本数据表
df_shop = pd.DataFrame({'门店ID': shop_list,
'成立时间': shop_start_dates,
'用户规模': shop_user_size,
'用户起点ID': shop_user_start,
'产品': shop_product_list})
return df_shop
def init_orders(shop):
"""
开始生成订单
shop: Series
:return (df_order, df_order_x), DataFrame
"""
df_order_all = []
df_order_x_all = []
# 门店所有用户
user_list = np.arange(shop['用户起点ID'], shop['用户起点ID']+shop['用户规模'])
for day in pd.date_range(shop['成立时间'].to_timestamp('D', 'start'), '2020-06-30', freq='D'):
# 每天随机生成96~1440个订单,随机分布在其用户群中
# TODO: 老用户在部分门店需要按某个比例淘汰,概率分布更准确
# freq从40S到600S随机
time_freq = np.random.randint(40, 601)
# 营业时间从上午6点到晚上10点
ot_list = pd.date_range(start=day+pd.Timedelta('6H'),
end=day+pd.Timedelta('22H'),
freq=f'{time_freq}S')
# 当天订单ID列表
n_order = ot_list.size # 当天订单量
order_id_list = ot_list.to_series().apply(
lambda x: f'{shop["门店ID"]}X{x.timestamp():.0f}')
order_id_list.index = np.arange(n_order)
# 用户二项概率分布
user_p = stats.binom.pmf(np.arange(user_list.size), n_order, p=0.5)
order_user = np.random.choice(user_list, p=user_p, size=n_order)
# 计算每个订单有多少个产品
order_product_nlist = np.random.choice(
np.arange(1, 6), p=productn_p, size=n_order)
# 生成当日订单明细表
x_order_prod = np.random.choice(
shop['产品'], size=order_product_nlist.sum())
x_orderid = order_id_list.loc[order_id_list.index.repeat(
order_product_nlist)]
x_order_prodn = np.random.choice(
[1, 2, 3], size=order_product_nlist.sum())
df_order_x = pd.DataFrame({'订单ID': x_orderid,
'产品': x_order_prod,
'数量': x_order_prodn})
df_order_x['单价'] = pd.Series(x_order_prod).map(
lambda x: product_dict[x])
# Bug: product_dict[x['产品']]不一定能获取到正确单价?
# df_order_x['原价'] = df_order_x.apply(
# lambda x: x['数量'] * product_dict[x['产品']], axis=1)
df_order_x['原价'] = df_order_x['数量']*df_order_x['单价']
# 生成当日订单
df_order = pd.DataFrame({'门店ID': [shop['门店ID']]*n_order,
'订单ID': order_id_list,
'用户ID': pd.Series(order_user).map(lambda x: f'U{x:08d}'),
'订单日期': ot_list,
'折扣': np.random.choice(discount_list, size=n_order, p=discount_p),
'付款方式': np.random.choice(pay_list, size=n_order, p=pay_p),
'就餐形式': np.random.choice(dining_list, size=n_order, p=dining_p),
'订单备注': np.random.choice(comment_list, size=n_order, p=comment_p)},
index=np.arange(n_order))
# 更新订单,原价、实付
df_order = df_order.merge(df_order_x.groupby('订单ID')[
'原价'].sum(), on='订单ID')
df_order['实付'] = df_order['原价'] * df_order['折扣']
df_order_all.append(df_order)
df_order_x_all.append(df_order_x)
df_all = | pd.concat(df_order_all, ignore_index=True) | pandas.concat |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._prepare_method(
self._build_mapreduce_func(lambda df: df.dtypes)
)
reduce_func = self._build_mapreduce_func(dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
self._dtype_cache = (
self._full_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
)
else:
self._dtype_cache = pandas.Series([])
# reset name to None because we use "__reduced__" internally
self._dtype_cache.name = None
return self._dtype_cache
dtypes = property(_get_dtype)
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(prefix) + str(x))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(prefix) + str(x))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(x) + str(suffix))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(x) + str(suffix))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(),
self.index.copy(),
self.columns.copy(),
self._dtype_cache,
self._is_transposed,
)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
if self._is_transposed:
# If others are transposed, we handle that behavior correctly in
# `copartition`, but it is not handled correctly in the case that `self` is
# transposed.
return (
self.transpose()
._append_list_of_managers(
[o.transpose() for o in others], axis ^ 1, **kwargs
)
.transpose()
)
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
new_self, to_append, joined_axis = self.copartition(
axis ^ 1,
others,
join,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of QueryCompiler objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
new_self, to_join, joined_index = self.copartition(
0,
others,
how,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
if len(others) == 1:
others_proxy = pandas.DataFrame(columns=others[0].columns)
else:
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
self_proxy = pandas.DataFrame(columns=self.columns)
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Copartition
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = compute_reindex(right_old_idxes[i])
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis,
other[i].data,
reindex_left,
reindex_right,
other[i]._is_transposed,
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object.
"""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To/From Pandas
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the QueryCompiler.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
@classmethod
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns QueryCompiler containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
# END To/From Pandas
# To NumPy
def to_numpy(self):
"""Converts Modin DataFrame to NumPy Array.
Returns:
NumPy Array of the QueryCompiler.
"""
arr = self.data.to_numpy(is_transposed=self._is_transposed)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New QueryCompiler with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, sort=False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New QueryCompiler with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
# If this QueryCompiler is transposed, copartition can sometimes fail to
# properly co-locate the data. It does not fail if other is transposed, so
# if this object is transposed, we will transpose both and do the operation,
# then transpose at the end.
if self._is_transposed:
return (
self.transpose()
._inter_manager_operations(
other.transpose(), "outer", lambda x, y: func(x, y, **kwargs)
)
.transpose()
)
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def binary_op(self, op, other, **kwargs):
"""Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
"""
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self._map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self._scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
if axis == 1 and isinstance(scalar, pandas.Series):
new_columns = self.columns.union(
[label for label in scalar.index if label not in self.columns]
)
else:
new_columns = self.columns
return self.__constructor__(new_data, self.index, new_columns)
else:
return self._map_partitions(self._prepare_method(func))
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
if self._is_transposed:
return (
self.transpose()
.reindex(axis=axis ^ 1, labels=labels, **kwargs)
.transpose()
)
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the data within the blocks.
new_manager = self.__constructor__(
new_data, self.columns, self.index, is_transposed=self._is_transposed ^ 1
)
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler, which will be handled in the front end.
def _full_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
"""
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
)
def _build_mapreduce_func(self, func, **kwargs):
def _map_reduce_func(df):
series_result = func(df, **kwargs)
if kwargs.get("axis", 0) == 0 and isinstance(series_result, pandas.Series):
# In the case of axis=0, we need to keep the shape of the data
# consistent with what we have done. In the case of a reduction, the
# data for axis=0 should be a single value for each column. By
# transposing the data after we convert to a DataFrame, we ensure that
# the columns of the result line up with the columns from the data.
# axis=1 does not have this requirement because the index already will
# line up with the index of the data based on how pandas creates a
# DataFrame from a Series.
return pandas.DataFrame(series_result).T
return | pandas.DataFrame(series_result) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
suspPre = pickle.load(file2)['suspM']
file2.close()
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[~flag] = 0
suspM = pd.concat([suspPre,endFlag]).fillna(method='pad')
suspM = pd.DataFrame(suspM,index=[str(tradingDay)])
suspM[(listedM==0)] = 1
sql='''
SELECT A.[SpecialTradeTime],A.[SpecialTradeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SpecialTrade] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.[SpecialTradeType]=1 or A.[SpecialTradeType] = 2 or A.[SpecialTradeType] = 5 or A.[SpecialTradeType] = 6)
and A.[SpecialTradeTime] = '%s'
'''% tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SpecialTradeTime] = ','A.[SpecialTradeTime] <= ')
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
dateTotal = np.union1d(stStateM.index.values, [str(tradingDay)])
stStateM = pd.DataFrame(stStateM, index=dateTotal, columns=self.tickerUnivSR)
stStateM = stStateM.fillna(method='pad')
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0)
else:
try:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
dataV = pd.read_sql_query(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
dataV.SpecialTradeTime = dataV.SpecialTradeTime.map(lambda x: x.strftime('%Y%m%d'))
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 5] = 1
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 2] = 0
dataV['SpecialTradeType'][dataV['SpecialTradeType'] == 6] = 0
stStateM = pd.pivot_table(dataV, values='SpecialTradeType', index='SpecialTradeTime', columns='SecuCode')
stStateM = pd.concat([stStatePre,stStateM]).fillna(method='pad')
except:
file2 = open('../data/rawData/{}.pkl'.format(self.tradingDateV[self.tradingDateV.tolist().index(tradingDay)-1]), 'rb')
stStatePre = pickle.load(file2)['stStateM']
file2.close()
stStateM = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
stStateM = pd.concat([stStatePre,stStateM]).fillna(method='pad')
# stStateM = pd.DataFrame(stStatePre,index=np.concatenate([stStatePre.index.values,str(tradingDay)]))
# stStateM = stStateM.fillna(method='pad')
finally:
stStateM = pd.DataFrame(stStateM, index=[str(tradingDay)])
stStateM = stStateM.fillna(0).astype(int)
sql = '''
SELECT A.[InDate],A.[OutDate],B.[SecuCode] as IndexCode,B.[SecuMarket] as IndexMarket,C.[SecuCode],C.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexComponent] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexInnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
inner join [JYDB].[dbo].[SecuMain] C
on A.[SecuInnerCode]=C.[InnerCode]
and C.SecuMarket in (83,90)
and C.SecuCategory=1
where A.[InDate] = '%s' or A.[OutDate] = '%s'
'''%(tradingDay,tradingDay)
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[InDate] = ','A.[InDate] <= ').replace('A.[OutDate] = ','A.[OutDate] <= ')
data = pd.read_sql_query(sql, conn243)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x+'.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x+'.SZ')
flagMarket = data.IndexMarket==83
data['IndexCode'][flagMarket] = data['IndexCode'].map(lambda x: x+'.SH')
data['IndexCode'][~flagMarket] = data['IndexCode'].map(lambda x: x+'.SZ')
data.InDate = data.InDate.map(lambda x: x.strftime('%Y%m%d'))
flagDate = | pd.notnull(data.OutDate) | pandas.notnull |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/4/29 14:30
Desc: 基金数据-新发基金-新成立基金
http://fund.eastmoney.com/data/xinfound.html
"""
import pandas as pd
import requests
from akshare.utils import demjson
def fund_em_new_found() -> pd.DataFrame:
"""
基金数据-新发基金-新成立基金
http://fund.eastmoney.com/data/xinfound.html
:return: 新成立基金
:rtype: pandas.DataFrame
"""
url = "http://fund.eastmoney.com/data/FundNewIssue.aspx"
params = {
"t": "xcln",
"sort": "jzrgq,desc",
"y": "",
"page": "1,50000",
"isbuy": "1",
"v": "0.4069919776543214",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.strip("var newfunddata="))
temp_df = | pd.DataFrame(data_json["datas"]) | pandas.DataFrame |
import decimal
import math
import warnings
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from decimal import Decimal, localcontext
from itertools import repeat
from pathlib import Path
from time import time
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from .config import get_global_config
from .types import FilenameType
def python_hash(SSN: int) -> int:
"""
A pythonic implementation of COBOL code using floating-point arithmetic. Note that
this will differ ever-so-slightly from the cobol_hash due to the differing rounding
conventions.
"""
# Constants determined by DoIT
L_SD = SSN
C_Q = 127773 # 3^2 * 14197
C_A = 16807 # 7^5
C_R = 2836 # 2^2 * 709
C_M = 2147483647 # prime (In fact, 2^{2^5 - 1} - 1, double Mersenne)
# Translated
W_HI = L_SD // C_Q
W_LO = L_SD % C_Q
# Recombine the quotient and remainder mod a medium-sized almost-prime with two
# coprime factors. (N.B. Not sure exactly why C_A is a power of 7 whereas C_R is
# almost prime. Would be curious to read the history of this algorithm.)
L_SD = C_A * W_LO - C_R * W_HI
# Note that C_M is _almost_ 2^31, but not quite. Also, note that
# C_A * W_LO - C_R * W_HI is maximized when SSN = C_Q - 1
# and it is minimized when SSN is the largest social security number which is
# exactly divisible by C_Q, i.e., (999_99_9999 // C_Q) * C_Q = 999_95_1498.
#
# In either case, C_A * W_LO - C_R * W_HI \in (-C_M, C_M) and so the following
# block guarantees that L_SD will be in [0, C_M).
#
# We also note that the _smallest negative_ value that C_A * W_LO - C_R * W_HI can
# achieve in theory is -1 (since C_A and C_R are coprime) but I haven't done the
# computation to determine whether it's actually possible in this range of numbers
if L_SD <= 0:
warnings.warn("L_SD is negative")
L_SD += C_M
# And so by the above comment, L_RAND is in [0, 1) and this rounding gives us the
# top 10 digits of the mantissa
L_RAND = math.floor(L_SD / C_M * 1e10) / 1e10
return L_RAND
def cobol_hash(SSN: int) -> float:
"""
A python implementation of COBOL's fixed-point arithmetic
"""
with localcontext() as ctx:
# Constants determined by DoIT
ctx.prec = 10
ctx.rounding = decimal.ROUND_DOWN
L_SD = Decimal(SSN)
C_A = Decimal("0000016807")
C_M = Decimal("2147483647")
C_Q = Decimal("0000127773")
C_R = Decimal("0000002836")
# Translated
W_HI = (L_SD / C_Q).quantize(Decimal("1E0")) # L_SD // C_Q
W_LO = L_SD - C_Q * W_HI # L_SD % C_Q
L_SD = C_A * W_LO - C_R * W_HI
if L_SD <= 0:
L_SD += C_M
L_RAND = (L_SD / C_M).quantize(Decimal("1E-10"))
if L_RAND == 0:
warnings.warn("L_RAND is zero")
L_SD += C_M
return L_RAND
def generate_outcomes(
input_list: Optional[List[int]] = None,
process_type: str = "cobol",
low: Optional[int] = None,
high: Optional[int] = None,
size: Optional[int] = None,
all_values: Optional[bool] = False,
generate_rand_whole: Optional[bool] = False,
) -> pd.DataFrame:
"""
Helper function that generates L_RAND outcomes with the option for pythonic or cobol implmentations.
"""
# Generate a random sample of SSNs to test, and sort to verify monotonicity of relationship
if input_list is not None:
ssn_pool = input_list
elif not all_values:
# Setting seed to ensure replicability
np.random.seed(0)
ssn_pool = np.random.randint(low=low, high=high, size=size)
ssn_pool.sort()
elif all_values:
ssn_pool = np.arange(low, high)
# apply random number generator to SSN pool
if process_type == "python":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(executor.map(python_hash, ssn_pool), total=len(ssn_pool))
)
if process_type == "cobol":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(cobol_hash, ssn_pool.astype(str)), total=len(ssn_pool)
)
)
df = | pd.DataFrame(ssn_outcomes, columns=["L_RAND"]) | pandas.DataFrame |
'''Original implementation at https://github.com/wangtongada/BOA
'''
import itertools
import operator
import os
import warnings
from os.path import join as oj
from bisect import bisect_left
from collections import defaultdict
from copy import deepcopy
from itertools import combinations
from random import sample
import numpy as np
import pandas as pd
from mlxtend.frequent_patterns import fpgrowth
from numpy.random import random
from pandas import read_csv
from scipy.sparse import csc_matrix
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_X_y, check_is_fitted
from imodels.rule_set.rule_set import RuleSet
class BayesianRuleSetClassifier(RuleSet, BaseEstimator, ClassifierMixin):
'''Bayesian or-of-and algorithm.
Generates patterns that satisfy the minimum support and maximum length and then select the Nrules rules that have the highest entropy.
In function SA_patternbased, each local maximum is stored in maps and the best BOA is returned.
Remember here the BOA contains only the index of selected rules from Nrules self.rules_
'''
def __init__(self, n_rules: int = 2000,
supp=5, maxlen: int = 10,
num_iterations=5000, num_chains=3, q=0.1,
alpha_pos=100, beta_pos=1,
alpha_neg=100, beta_neg=1,
alpha_l=None, beta_l=None,
discretization_method='randomforest', random_state=0):
'''
Params
------
n_rules
number of rules to be used in SA_patternbased and also the output of generate_rules
supp
The higher this supp, the 'larger' a pattern is. 5% is a generally good number
maxlen
maximum length of a pattern
num_iterations
number of iterations in each chain
num_chains
number of chains in the simulated annealing search algorithm
q
alpha_pos
$\rho = alpha/(alpha+beta)$. Make sure $\rho$ is close to one when choosing alpha and beta
The alpha and beta parameters alter the prior distributions for different rules
beta_pos
alpha_neg
beta_neg
alpha_l
beta_l
discretization_method
discretization method
'''
self.n_rules = n_rules
self.supp = supp
self.maxlen = maxlen
self.num_iterations = num_iterations
self.num_chains = num_chains
self.q = q
self.alpha_pos = alpha_pos
self.beta_pos = beta_pos
self.alpha_neg = alpha_neg
self.beta_neg = beta_neg
self.discretization_method = discretization_method
self.alpha_l = alpha_l
self.beta_l = beta_l
self.random_state = 0
def fit(self, X, y, feature_names: list = None, init=[], verbose=False):
'''
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array_like, shape = [n_samples]
Labels
feature_names : array_like, shape = [n_features], optional (default: [])
String labels for each feature.
If empty and X is a DataFrame, column labels are used.
If empty and X is not a DataFrame, then features are simply enumerated
'''
# check inputs
self.attr_level_num = defaultdict(int) # any missing value defaults to 0
self.attr_names = []
# get feature names
if feature_names is None:
if isinstance(X, pd.DataFrame):
feature_names = X.columns
else:
feature_names = ['X' + str(i) for i in range(X.shape[1])]
# checks
X, y = check_X_y(X, y) # converts df to ndarray
check_classification_targets(y)
assert len(feature_names) == X.shape[1], 'feature_names should be same size as X.shape[1]'
np.random.seed(self.random_state)
# convert to pandas DataFrame
X = pd.DataFrame(X, columns=feature_names)
for i, name in enumerate(X.columns):
self.attr_level_num[name] += 1
self.attr_names.append(name)
self.attr_names_orig = deepcopy(self.attr_names)
self.attr_names = list(set(self.attr_names))
# set up patterns
self._set_pattern_space()
# parameter checking
if self.alpha_l is None or self.beta_l is None or len(self.alpha_l) != self.maxlen or len(
self.beta_l) != self.maxlen:
if verbose:
print('No or wrong input for alpha_l and beta_l - the model will use default parameters.')
self.C = [1.0 / self.maxlen] * self.maxlen
self.C.insert(0, -1)
self.alpha_l = [10] * (self.maxlen + 1)
self.beta_l = [10 * self.pattern_space[i] / self.C[i] for i in range(self.maxlen + 1)]
else:
self.alpha_l = [1] + list(self.alpha_l)
self.beta_l = [1] + list(self.beta_l)
# setup
self._generate_rules(X, y, verbose)
n_rules_current = len(self.rules_)
self.rules_len_list = [len(rule) for rule in self.rules_]
maps = defaultdict(list)
T0 = 1000 # initial temperature for simulated annealing
split = 0.7 * self.num_iterations
# run simulated annealing
for chain in range(self.num_chains):
# initialize with a random pattern set
if init != []:
rules_curr = init.copy()
else:
assert n_rules_current > 1, f'Only {n_rules_current} potential rules found, change hyperparams to allow for more'
N = sample(range(1, min(8, n_rules_current), 1), 1)[0]
rules_curr = sample(range(n_rules_current), N)
rules_curr_norm = self._normalize(rules_curr)
pt_curr = -100000000000
maps[chain].append(
[-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules_[i] for i in rules_curr]])
for iter in range(self.num_iterations):
if iter >= split:
p = np.array(range(1 + len(maps[chain])))
p = np.array(list(_accumulate(p)))
p = p / p[-1]
index = _find_lt(p, random())
rules_curr = maps[chain][index][2].copy()
rules_curr_norm = maps[chain][index][2].copy()
# propose new rules
rules_new, rules_norm = self._propose(rules_curr.copy(), rules_curr_norm.copy(), self.q, y)
# compute probability of new rules
cfmatrix, prob = self._compute_prob(rules_new, y)
T = T0 ** (1 - iter / self.num_iterations) # temperature for simulated annealing
pt_new = sum(prob)
with warnings.catch_warnings():
if not verbose:
warnings.simplefilter("ignore")
alpha = np.exp(float(pt_new - pt_curr) / T)
if pt_new > sum(maps[chain][-1][1]):
maps[chain].append([iter, prob, rules_new, [self.rules_[i] for i in rules_new]])
if verbose:
print((
'\n** chain = {}, max at iter = {} ** \n accuracy = {}, TP = {},FP = {}, TN = {}, FN = {}'
'\n pt_new is {}, prior_ChsRules={}, likelihood_1 = {}, likelihood_2 = {}\n').format(
chain, iter, (cfmatrix[0] + cfmatrix[2] + 0.0) / len(y), cfmatrix[0], cfmatrix[1],
cfmatrix[2], cfmatrix[3], sum(prob), prob[0], prob[1], prob[2])
)
self._print_rules(rules_new)
print(rules_new)
if random() <= alpha:
rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new
pt_max = [sum(maps[chain][-1][1]) for chain in range(self.num_chains)]
index = pt_max.index(max(pt_max))
self.rules_ = maps[index][-1][3]
return self
def __str__(self):
return ' '.join(str(r) for r in self.rules_)
def predict(self, X):
check_is_fitted(self)
if isinstance(X, np.ndarray):
df = pd.DataFrame(X, columns=self.attr_names_orig)
else:
df = X
Z = [[]] * len(self.rules_)
dfn = 1 - df # df has negative associations
dfn.columns = [name.strip() + '_neg' for name in df.columns]
df = | pd.concat([df, dfn], axis=1) | pandas.concat |
from services.SingletonMeta import SingletonMeta
import pandas as pd
import numpy as np
import jenkspy
import joblib
from sklearn import linear_model
import os
class EtlService(metaclass=SingletonMeta):
def setEda(self, eda):
self.eda = eda
def readDataSource(self):
filename = os.path.dirname(__file__) + "../../../../data/data_source.csv"
filename = os.path.abspath(filename)
properties = self.eda.getProperties()
data = pd.read_csv(filename, usecols=properties, dtype={
'user_verification_level': str,
'email_valid': str,
'ip_vpn': str,
'phone_valid': str
}, low_memory=False)
# used only class, clean data
data = data[((data['fraud_state'] == 'APPROVE') | (data['fraud_state'] == 'DECLINE'))]
print('<<< EtlService:EtlService: The shape of data:', data.shape)
return data
def getFilterData(self, data):
properties = self.eda.getProperties()
return pd.DataFrame(data, columns=properties )
def featureEngineering(self, data, action='train'):
# Missing value
data = self.missingValue(data)
print('<<< EtlService:featureEngineering: missingValue:', data)
# Same format
data = self.sameFormat(data)
print('<<< EtlService:featureEngineering: sameFormat:', data)
# Checking outlier values
outlierFields = self.eda.getOutlierFields()
if action == 'train':
data = self.iqrChekOutlierValues(data, outlierFields)
else:
data = self.jenksBreakMethodClasify(data, outlierFields)
# Featurization data
print('<<< EtlService:featureEngineering: jenksBreakMethodClasify:', data)
data = self.featurizationData(data)
print('<<< EtlService:featureEngineering: featurizationData:', data)
return data
def generate(self):
data = self.readDataSource()
data = self.featureEngineering(data)
path = os.path.dirname(__file__) + "../../../../data/"
data.to_csv(path + 'datamining_view.csv')
return data.shape
#description set 'none' to empty string, and set 0 to empty number
def missingValue(self, data):
dataObj = data.select_dtypes(include=np.object).columns.tolist()
# data[dataObj] = data[dataObj].astype('string')
data[dataObj] = data[dataObj].fillna('none')
obj_columnsFloat = data.select_dtypes(include=np.float64).columns.tolist()
data[obj_columnsFloat] = data[obj_columnsFloat].fillna(0)
return data
#replace tags to get same format
def sameFormat(self, data):
# Select columns which contains any value feature: 'OTHER', 'CURL', 'NONE'
filter = ((data == 'OTHER') | (data == 'CURL') | (data == 'NONE')).any()
obj_columnsReplace = data.loc[:, filter].columns.tolist()
# unify feature value
data[obj_columnsReplace] = data[obj_columnsReplace].replace(['OTHER'], 'Other')
data[obj_columnsReplace] = data[obj_columnsReplace].replace(['NONE'], 'none')
data[obj_columnsReplace] = data[obj_columnsReplace].replace(['CURL'], 'curl')
filterComprobation = ((data == 'OTHER') | (data == 'CURL') | (data == 'NONE')).any()
print(len(data.loc[:, filterComprobation].columns.tolist()))
return data
#format variables with outlier problems, obtain the limits that go from the mean, then create ranges with those values and are labeled
def iqrChekOutlierValues(self, data, outlierFields):
for item in outlierFields:
# create nominal intervals
print('>>> EtlService:iqrChekOutlierValues >>>')
# checking outlier values
q1_amount = data[item['name']].quantile(.25)
q3_amount = data[item['name']].quantile(.75)
IQR_amount = q3_amount - q1_amount
print('transaction amount IQR: ', IQR_amount)
# defining limits
sup_amount = q3_amount + 1.5 * IQR_amount
inf_amount = q1_amount - 1.5 * IQR_amount
print('transaction amount Upper limit: ', sup_amount)
print('transaction amount Lower limit: ', inf_amount)
# cleaning the outliers in 'transaction amount' values
data_clean_transaction = data.copy()
data_clean_transaction.drop( data_clean_transaction[data_clean_transaction[item['name']] > sup_amount].index, axis=0, inplace=True)
data_clean_transaction.drop( data_clean_transaction[data_clean_transaction[item['name']] < inf_amount].index, axis=0, inplace=True)
data = self.jenksBreakMethodTrain(item, data_clean_transaction, data)
return data
# generate Outlier
def generateOutlierModel(self, feature, dataByFeature, data):
labels = feature['labels']
breaks = jenkspy.jenks_breaks(dataByFeature[feature['name']], nb_class=len(labels))
minValue = data[feature['name']].min()
maxvalue = data[feature['name']].max()
if breaks[0] != minValue:
breaks.insert(0, minValue)
labels.insert(0, 'outlier-left')
if breaks[len(breaks)-1] != maxvalue:
breaks.append(maxvalue)
labels.append('outlier-right')
numb_Bins = len(breaks) - 1
outlierData = {
"breaks": breaks,
"labels": labels
}
print('>>> EtlService:generateOutlierModel:breaks >>>', breaks)
print('>>> EtlService:generateOutlierModel:numb_Bins >>>', numb_Bins)
filename = feature['name'].replace(" ", "_")
self.save_object("data/datamining_outlier_" + filename, outlierData)
print('>>> EtlService:generateOutlierModel!')
return outlierData
# avoid Outlier
def avoidOutlier(self, feature, data, outlierData):
return | pd.cut(data[feature['name']], bins=outlierData['breaks'], labels=outlierData["labels"], include_lowest=True) | pandas.cut |
import os # isort:skip
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy import stats
from tensorflow.keras.layers import (
Activation,
Conv2D,
Dense,
Dropout,
Flatten,
MaxPooling2D,
)
from tensorflow.keras.models import Sequential
# from tensorflow.keras.optimizers import Adam
class RutherfordNet:
"""The convolutional neural network (CNN) described in Rutherford et al. 2020."""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.input_shape = kwargs.get("input_shape", [142, 139, 1])
self.output_dense_units = kwargs.get("output_dense_units", 3)
self.compile_kws = kwargs.get("compile_kws", {})
self.model = self.create_model(
name=self.name,
input_shape=self.input_shape,
output_dense_units=self.output_dense_units,
compile_kws=self.compile_kws,
)
def create_model(
self,
name="rutherfordnet",
input_shape=[142, 139, 1],
output_dense_units=3,
compile_kws={},
):
"""Builds and compiles the CNN.
Args:
name (str, optional): The name of the model. Defaults to "rutherfordnet".
compile_kws (dict, optional): Additional keyword arguments which
will be passed to tensorflow.keras.Model.compile(). Defaults to {}.
Returns:
tensorflow.keras.Model: The compiled CNN model.
"""
model = Sequential(name=name)
# Convolution layers
# first layer
model.add(
Conv2D(
20, (5, 5), padding="same", input_shape=input_shape, activation="elu"
)
)
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.2))
# second layer
model.add(Conv2D(10, (10, 10), padding="same", activation="elu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.2))
# third layer
model.add(Conv2D(10, (15, 15), padding="same", activation="elu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten()) # converts 3D feature maps to 1D feature maps
model.add(Dropout(0.2))
# Dense Layers
model.add(Dense(512, activation="elu"))
model.add(Dropout(0.2))
model.add(Dense(256, activation="elu"))
model.add(Dropout(0.2))
model.add(Dense(256, activation="elu"))
# Output layer
model.add(Dropout(0.2))
model.add(Dense(output_dense_units, activation="linear"))
default_compile_kws = dict(
loss="mean_squared_error", optimizer="adam", metrics=["accuracy"]
)
"""
opt = Adam(learning_rate=0.0001)
default_compile_kws = dict(
loss="mean_squared_error", optimizer=opt, metrics=["accuracy"]
)
"""
compile_kws = dict(default_compile_kws, **compile_kws)
model.compile(**compile_kws)
return model
def get_training_data(self, dataset, ss_results_df, mix_results_df):
"""Assembles a training data in a format that is able to be ingested by the
Keras CNN model.
Args:
dataset (pyeem.datasets.Dataset): The PyEEM dataset being used to
generate training data.
ss_results_df (pandas.DataFrame): The augmented single source spectra results.
mix_results_df (pandas.DataFrame): The augmented mixture spectra results.
Returns:
tuple of numpy.ndarray: The formatted training data to be used in
pyeem.analysis.models.RutherfordNet.train()
"""
sources = list(dataset.calibration_sources.keys())
aug_results_df = pd.concat([ss_results_df, mix_results_df])
aug_df = []
for p in aug_results_df.index.get_level_values("hdf_path").unique().to_list():
aug_df.append( | pd.read_hdf(dataset.hdf, key=p) | pandas.read_hdf |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
import h5py
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.datasets import make_blobs
from sklearn.metrics import log_loss
from sklearn.preprocessing import MinMaxScaler
# ----------------------------------------------------------------------
# Preprocess data
# ----------------------------------------------------------------------
def get_data(debug=False):
train_dataset = h5py.File('./data/train_cat_vs_noncat.h5', 'r')
train_x_orig = np.array(train_dataset['train_set_x'][:])
train_y_orig = np.array(train_dataset['train_set_y'][:])
test_dataset = h5py.File('./data/test_cat_vs_noncat.h5', 'r')
test_x_orig = np.array(test_dataset['test_set_x'][:])
test_y_orig = np.array(test_dataset['test_set_y'][:])
if debug:
Image.fromarray(train_x_orig[2]).show()
classes = np.array(test_dataset['list_classes'][:])
# reshape from (209,) to row vectors (1, 209)
train_y = train_y_orig.reshape((1, train_y_orig.shape[0]))
test_y = test_y_orig.reshape((1, test_y_orig.shape[0]))
num_px = train_x_orig.shape[1]
print('Dataset dimensions:')
print('Number of training examples:', train_x_orig.shape[0])
print('Number of testing examples:', test_x_orig.shape[0])
print('Images height and width:', num_px)
print('Image size: (%s, %s, 3)' % (num_px, num_px))
print('train_x shape:', train_x_orig.shape)
print('train_y shape:', train_y.shape)
print('test_x shape:', test_x_orig.shape)
print('test_y shape:', test_y.shape)
print('classes:', classes)
# reshape images from (num_px, num_px, 3) to (num_px * num_px * 3, 1)
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
print('train_x_flatten shape:', train_x_flatten.shape)
print('train_y shape:', train_y.shape)
print('test_x_flatten shape:', test_x_flatten.shape)
print('test_y shape:', test_y.shape)
print('sanity check after reshaping:', train_x_flatten[0:5, 0])
# standardize data
train_x = train_x_flatten / 255.
test_x = test_x_flatten / 255.
return train_x, train_y, test_x, test_y
# ----------------------------------------------------------------------
# Define model
# ----------------------------------------------------------------------
def init_params(layers_dims):
"""
Arguments:
layers_dims -- list with layers dimensions
Returns:
parameters -- dictionary with "w1", "b1", ..., "wn", "bn":
wi -- weight matrix of shape (l_dims[i], l_dims[i-1])
bi -- bias vector of shape (layer_dims[i], 1)
"""
params = {}
for n in range(1, len(layers_dims)):
w = 'w%s' % n
params[w] = np.random.randn(
layers_dims[n], layers_dims[n-1])
params[w] /= np.sqrt(layers_dims[n-1])
b = 'b%s' % n
params[b] = np.zeros((layers_dims[n], 1))
assert params[w].shape == (layers_dims[n], layers_dims[n - 1])
assert params[b].shape == (layers_dims[n], 1)
return params
# ----------------------------------------------------------------------
# Forward propagation
# ----------------------------------------------------------------------
def sigmoid(z):
"""
Implements sigmoid activation
Arguments:
z -- numpy array, shape (k, 1)
Returns:
a -- output of sigmoid(z), same shape as z
cache -- contains z for efficient backprop
"""
a = 1 / (1 + np.exp(-z))
assert a.shape == z.shape
return a, z
def relu(z):
"""
Implements ReLU activation.
Arguments:
z -- output of a dense layer, shape (k, 1)
Returns:
a -- output of relu(z), same shape as z
cache -- contains z for efficient backprop
"""
a = np.maximum(0, z)
assert a.shape == z.shape
return a, z
def softmax(z):
"""Computes softmax for array of scores.
Arguments:
z -- output of a dense layer, shape (k, 1)
Returns:
a -- post-activation vector, same shape as z
cache -- contains z for efficient backprop
Theory:
e^y_i / sum(e^y_j), for j = 0..(len(z)-1)
https://stackoverflow.com/questions/34968722
Example:
z = np.array([[5], [2], [-1], [3]])
a = np.exp(z) / np.exp(z).sum()
[[0.84203357], [0.04192238], [0.00208719], [0.11395685]]
assert np.isclose(a.sum(), 1)
"""
a = np.exp(z) / np.exp(z).sum(axis=0)
assert z.shape[1] == sum(np.isclose(a.sum(axis=0), 1))
# to predict use
# a = (a >= 0.5).astype(np.int)
return a, z
def dense_layer_propagate(a, w, b):
"""
Implements dense layer forward propagation.
Arguments:
a -- activations from previous layer (or input data):
(size of previous layer, number of examples)
w -- weights matrix: (size of current layer, size of previous layer)
b -- bias vector (size of the current layer, 1)
Returns:
z -- the input of the activation function, aka pre-activation parameter
cache -- dictionary with "a", "w" and "b"
stored for computing the backward pass efficiently
"""
z = np.dot(w, a) + b
assert z.shape == (w.shape[0], a.shape[1])
return z, (a, w, b)
def dense_activation_propagate(a_prev, w, b, activation):
"""
Implements forward propagation for a dense-activation layer
Arguments:
a_prev -- activations from previous layer:
(size of previous layer, number of examples)
w -- weights (size of curr layer, size of prev layer)
b -- bias vector (size of the current layer, 1)
activation -- 'sigmoid', 'relu', 'softmax'
Returns:
a -- also called the post-activation value
cache -- for computing the backward pass efficiently
"""
z, dense_cache = dense_layer_propagate(a_prev, w, b)
if activation == 'sigmoid':
a, activation_cache = sigmoid(z)
elif activation == 'relu':
a, activation_cache = relu(z)
elif activation == 'softmax':
a, activation_cache = softmax(z)
# a_prev.shape[1] gives the number of examples
assert (a.shape == (w.shape[0], a_prev.shape[1]))
return a, (dense_cache, activation_cache)
def foreword_propagate(x, params, activation, y_dim):
"""
Implements forward propagation for dense-relu * (n-1) -> dense-sigmoid
Arguments:
x -- data, array of shape (input size, number of examples)
parameters -- output of init_parameters()
activation -- activation function for last layer
Returns:
al -- last post-activation value
caches -- list containing:
caches of dense-relu with size n-1 indexed from 0 to n-2
cache of dense-sigmoid indexed n-1
"""
caches = []
a = x
n_layers = len(params) // 2 # number of layers
print('-' * 40)
# implements linear-relu * (l-1)
# adds cache to the caches list
for i in range(1, n_layers):
a_prev = a
wi = params['w' + str(i)]
bi = params['b' + str(i)]
a, cache = dense_activation_propagate(a_prev, wi, bi, activation='relu')
print('layer:', i)
print('z:', cache)
print('a:', a)
print('-' * 40)
caches.append(cache)
# implements linear-sigmoid or linear-softmax
# adds cache to the caches list
wi = params['w%s' % n_layers]
bi = params['b%s' % n_layers]
y_hat, cache = dense_activation_propagate(a, wi, bi, activation=activation)
print('output layer:')
print('z:', cache)
print('a:', y_hat)
print('-' * 40)
caches.append(cache)
assert (y_hat.shape == (y_dim, x.shape[1]))
return y_hat, caches
# ----------------------------------------------------------------------
# Compute cost -- log_loss
# ----------------------------------------------------------------------
def comp_cost(y_hat, y, activation, epsilon=1e-15):
"""
Computes x-entropy cost function.
Arguments:
y_hat -- probability vector (model predictions), shape: (1, # examples)
y -- true "label" vector
activation -- activation function for last layer
Returns:
cost -- cross-entropy cost
Note: experimental, use sklearn.metrics.log_loss instead
"""
if activation == 'sigmoid':
m = y.shape[1]
cost = np.dot(y, np.log(y_hat).T) + np.dot((1 - y), np.log(1 - y_hat).T)
cost = (-1. / m) * cost
cost = np.squeeze(cost) # turns [[17]] into 17).
assert (cost.shape == ())
elif activation == 'softmax':
"""
Computes x-entropy between y (encoded as one-hot vectors) and y_hat.
Arguments:
y_hat -- predictions, array (n, k), (# of examples, # of categories)
y -- true 'label' np.array (n, k) (# of examples, # of categories)
Returns:
cost -- categorical cross entropy cost
Algorithm:
-1./N * sum_i(sum_j t_ij * log(p_ij)), i=1..len(y), j=1..k
y_hat = np.clip(y_hat, epsilon, 1. - epsilon)
-np.sum(y * np.log(y_hat + epsilog)) / y_hat.shape[0]
"""
cost = log_loss(y, y_hat)
else:
raise AttributeError('Unexpected activation function:', activation)
return cost
# ----------------------------------------------------------------------
# Back propagate
# ----------------------------------------------------------------------
def sigmoid_back_propagate(da, cache):
"""
Implements back propagation for a single sigmoid unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from the forward propagate of curr layer
Returns:
dz -- gradient of cost wrt z
"""
z = cache
s = 1 / (1 + np.exp(-z))
dz = da * s * (1 - s)
assert (dz.shape == z.shape)
assert (da.shape == z.shape)
return dz
def softmax_back_propagate(da, cache):
"""
Implements back propagation for a softmax unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from the forward propagate of curr layer
Returns:
dz -- gradient of cost wrt z
"""
z = cache
y_hat = np.exp(z) / np.exp(z).sum()
dz = da * (1 - y_hat)
assert (dz.shape == z.shape)
return dz
def relu_back_propagate(da, cache):
"""
Implements back propagate for a single relu unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from forward propagattion of curr layer
Returns:
dz -- gradient cost wrt z
"""
z = cache
dz = np.array(da, copy=True) # converting dz to correct type
# when z <= 0, set dz to 0
dz[z <= 0] = 0.
assert (dz.shape == z.shape)
return dz
def dense_back_propagate(dz, cache):
"""
Implements dense layer back propagation.
Arguments:
dz -- gradient of cost wrt output of curr layer
cache -- (a_prev, w, b) from forward propagate in current layer
Returns:
da_prev -- gradient of cost wrt prev layer activation, shape as a_prev
dw -- gradient of cost wrt curr layer w, shape as w
db -- gradient of cost wrt b, shape as b
"""
a_prev, w, b = cache
m = a_prev.shape[1]
dw = (1. / m) * np.dot(dz, a_prev.T)
db = (1. / m) * np.sum(dz, axis=1, keepdims=True)
da_prev = np.dot(w.T, dz)
assert (da_prev.shape == a_prev.shape)
assert (dw.shape == w.shape)
assert (db.shape == b.shape)
return da_prev, dw, db
def dense_activation_back_propagate(da, cache, activation):
"""
Back propagation for a dense-activation layer.
Arguments:
da -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache)
a -- activation as string: 'sigmoid', 'relu', or 'softmax'
Returns:
da_prev -- gradient of cost wrt the activation
of the previous layer l-1, same shape as a_prev
dw -- gradient of cost wrt w (current layer l), same shape as w
db -- Gradient of cost wrt b (current layer l), same shape as b
"""
dense_cache, a_cache = cache
if activation == 'relu':
dz = relu_back_propagate(da, a_cache)
elif activation == 'sigmoid':
dz = sigmoid_back_propagate(da, a_cache)
elif activation == 'softmax':
dz = da # softmax_back_propagate(da, a_cache)
da_prev, dw, db = dense_back_propagate(dz, dense_cache)
return da_prev, dw, db
def back_propagate(y_hat, y, caches, activation):
"""
Implements backprop for linear-relu * (n-1) -> linear-sigmoid model.
Arguments:
al -- probability prediction vector, output of l_model_forward()
y -- true "label" vector
caches -- list of caches containing:
every cache from foreword_propagate
Returns:
grads -- dictionary with the gradients:
grads['dai'], grads['dwi'], grads['dbi'] for i in (n-1..0)
"""
y = y.reshape(y_hat.shape)
grads = {}
if activation == 'sigmoid':
# derivative of cost wrt output activation for binary classifier
da = - (np.divide(y, y_hat) - np.divide(1 - y, 1 - y_hat))
elif activation == 'softmax':
# for multi class classifier, unlike sigmoid,
# do not compute the derivative of cost
# wrt output activation
# but the derivative of cost wrt input of softmax
da = y_hat - y
else:
raise ValueError('Unexpected activation function:', activation)
# i-th layer sigmoid-dense gradients
# inputs: ai, y, caches
# outputs: grads['dai'], grads['dwi'], grads['dbi']
n = len(caches)
c = caches[n-1]
grads['da%s' % n], grads['dw%s' % n], grads['db%s' % n] = (
dense_activation_back_propagate(da, c, activation=activation))
for i in reversed(range(n - 1)):
c = caches[i]
da_prev_temp, dw_temp, db_temp = dense_activation_back_propagate(
grads['da%s' % (i+2)], c, activation="relu")
grads['da%s' % (i+1)] = da_prev_temp
grads['dw%s' % (i+1)] = dw_temp
grads['db%s' % (i+1)] = db_temp
return grads
def update_parameters(params, grads, alpha):
"""
Updates model parameters using gradient descent.
Arguments:
params -- dictionary containing model parameters
grads -- dictionary with gradients, output of L_model_backward()
Returns:
params -- dictionary with updated parameters
params['w' + str(l)] = ...
params['b' + str(l)] = ...
"""
n_layers = len(params) // 2
for i in range(n_layers):
params['w%s' % (i+1)] = (
params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])
params['b%s' % (i+1)] = (
params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])
return params
def sequential_model(
x, y, layers_dims, alpha=0.0075, n_iters=3000, debug=False):
"""
Implements a multilayer NN: linear-relu*(l-1)->linear-sigmoid.
Arguments:
x -- input data, shape (# of examples, num_px * num_px * 3)
y -- true "label" vector, shape (1, number of examples)
layers_dims -- list with input and layer sizes of length
(# of layers + 1).
alpha -- learning rate of the gradient descent update rule
n_iters -- number of iterations of the optimization loop
debug -- if True, prints cost every 100 steps
Returns:
params -- learned parameters used for prediction
"""
costs = []
params = init_params(layers_dims)
activation = 'sigmoid' if y.shape[0] == 1 else 'softmax'
# gradient descent loop
for i in range(0, n_iters):
ai, caches = foreword_propagate(x, params, activation, layers_dims[-1])
cost = comp_cost(ai, y, activation)
grads = back_propagate(ai, y, caches, activation)
params = update_parameters(params, grads, alpha)
if debug and i % 100 == 0:
print('Cost after iteration %i: %f' % (i, cost))
if debug and i % 100 == 0:
costs.append(cost)
def plot_cost():
return True
# plt.plot(np.squeeze(costs))
# plt.ylabel('cost')
# plt.xlabel('iterations (per tens)')
# plt.title('Learning rate =' + str(learning_rate))
# plt.show()
if debug:
plot_cost()
return params
def test_dnn():
layers_dims = [10, 4, 2, 1]
np.random.seed(42)
x = np.random.randn(30).reshape((10, 3))
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
print('x shape:', x.shape)
# (10, 3)
y = np.random.randint(0, 2, 3)
y = y.reshape((1, 3))
print('y shape:', y.shape)
# (1, 3)
params = init_params(layers_dims)
activation = 'sigmoid'
y_hat, caches = foreword_propagate(x, params, activation, layers_dims[-1])
print(y_hat)
'''
x = array([[ 0.49671415, -0.1382643 , 0.64768854],
[ 1.52302986, -0.23415337, -0.23413696],
[ 1.57921282, 0.76743473, -0.46947439],
[ 0.54256004, -0.46341769, -0.46572975],
[ 0.24196227, -1.91328024, -1.72491783],
[-0.56228753, -1.01283112, 0.31424733],
[-0.90802408, -1.4123037 , 1.46564877],
[-0.2257763 , 0.0675282 , -1.42474819],
[-0.54438272, 0.11092259, -1.15099358],
[ 0.37569802, -0.60063869, -0.29169375]])
y = array([[1, 0, 1]])
params = {
'b1': array([[0.],
[0.],
[0.],
[0.]]),
'b2': array([[0.],
[0.]]),
'b3': array([[0.]]),
'w1': array([[ 0.17511345, -0.47971962, -0.30251271, -0.32758364, -0.15845926,
0.13971159, -0.25937964, 0.21091907, 0.04563044, 0.23632542],
[-0.10095298, -0.19570727, 0.34871516, -0.58248266, 0.12900959,
0.29941416, 0.1690164 , -0.06477899, -0.08915248, 0.00968901],
[-0.22156274, 0.21357835, 0.02842162, -0.19919548, 0.33684907,
-0.21418677, 0.44400973, -0.39859007, -0.13523984, -0.05911348],
[-0.72570658, 0.19094223, -0.05694645, 0.05892507, 0.04916247,
-0.04978276, -0.14645337, 0.20778173, -0.4079519 , -0.04742307]]),
'w2': array([[-0.32146246, 0.11706767, -0.18786398, 0.20685326],
[ 0.61687454, -0.21195547, 0.51735934, -0.35066345]]),
'w3': array([[ 0.6328142 , -1.27748553]])}
layer: 1
z: ((array([[ 0.49671415, -0.1382643 , 0.64768854],
[ 1.52302986, -0.23415337, -0.23413696],
[ 1.57921282, 0.76743473, -0.46947439],
[ 0.54256004, -0.46341769, -0.46572975],
[ 0.24196227, -1.91328024, -1.72491783],
[-0.56228753, -1.01283112, 0.31424733],
[-0.90802408, -1.4123037 , 1.46564877],
[-0.2257763 , 0.0675282 , -1.42474819],
[-0.54438272, 0.11092259, -1.15099358],
[ 0.37569802, -0.60063869, -0.29169375]]), array([[ 0.17511345, -0.47971962, -0.30251271, -0.32758364, -0.15845926,
0.13971159, -0.25937964, 0.21091907, 0.04563044, 0.23632542],
[-0.10095298, -0.19570727, 0.34871516, -0.58248266, 0.12900959,
0.29941416, 0.1690164 , -0.06477899, -0.08915248, 0.00968901],
[-0.22156274, 0.21357835, 0.02842162, -0.19919548, 0.33684907,
-0.21418677, 0.44400973, -0.39859007, -0.13523984, -0.05911348],
[-0.72570658, 0.19094223, -0.05694645, 0.05892507, 0.04916247,
-0.04978276, -0.14645337, 0.20778173, -0.4079519 , -0.04742307]]), array([[0.],
[0.],
[0.],
[0.]])), array([[-1.16416195, 0.41311912, 0.03543866],
[-0.33736273, -0.2115404 , 0.39936218],
[ 0.09221453, -0.96629303, 0.62912924],
[ 0.20260571, 0.14508069, -0.64319486]]))
a: [[0. 0.41311912 0.03543866]
[0. 0. 0.39936218]
[0.09221453 0. 0.62912924]
[0.20260571 0.14508069 0. ]]
----------------------------------------
layer: 2
z: ((array([[0. , 0.41311912, 0.03543866],
[0. , 0. , 0.39936218],
[0.09221453, 0. , 0.62912924],
[0.20260571, 0.14508069, 0. ]]), array([[-0.32146246, 0.11706767, -0.18786398, 0.20685326],
[ 0.61687454, -0.21195547, 0.51735934, -0.35066345]]), array([[0.],
[0.]])), array([[ 0.02458586, -0.10279187, -0.08283052],
[-0.02333837, 0.20396817, 0.26270009]]))
a: [[0.02458586 0. 0. ]
[0. 0.20396817 0.26270009]]
----------------------------------------
output layer:
z: ((array([[0.02458586, 0. , 0. ],
[0. , 0.20396817, 0.26270009]]), array([[ 0.6328142 , -1.27748553]]), array([[0.]])), array([[ 0.01555828, -0.26056638, -0.33559556]]))
a: [[0.50388949 0.43522448 0.41687976]]
----------------------------------------
y_hat = array([[0.50388949, 0.43522448, 0.41687976]])
'''
if __name__ == '__main__':
np.random.seed(1)
train_x, train_y, test_x, test_y = get_data()
if False:
layers_dims = [12288, 20, 7, 5, 2]
df = | pd.DataFrame(data=train_y[0], columns=['yt']) | pandas.DataFrame |
from io import StringIO
from copy import deepcopy
import numpy as np
import pandas as pd
import re
from glypnirO_GUI.get_uniprot import UniprotParser
from sequal.sequence import Sequence
from sequal.resources import glycan_block_dict
# Defining important colume names within the dataset
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
rt = "Scan Time"
selected_aa = {"N", "S", "T"}
tmt_mod_regex = re.compile("\w(\d+)\((.+)\)")
# Defining important regular expression pattern to parse the dataset
regex_glycan_number_pattern = "\d+"
glycan_number_regex = re.compile(regex_glycan_number_pattern)
regex_pattern = "\.[\[\]\w\.\+\-]*\."
sequence_regex = re.compile(regex_pattern)
uniprot_regex = re.compile("(?P<accession>[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})(?P<isoform>-\d)?")
glycan_regex = re.compile("(\w+)\((\d+)\)")
# Function to filter for only PSM collections that do not only containing unglycosylated peptides
def filter_U_only(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 or True not in np.isin(unique_glycan, "U"):
# print(unique_glycan)
return True
return False
# Filter for only PSMs that are unglycosylated within PSM collections that do not only containing unglycosylated peptides
def filter_with_U(df):
unique_glycan = df["Glycans"].unique()
if len(unique_glycan) > 1 \
and \
True in np.isin(unique_glycan, "U"):
return True
return False
# parse modification mass and convert it from string to float
def get_mod_value(amino_acid):
if amino_acid.mods:
if amino_acid.mods[0].value.startswith("+"):
return float(amino_acid.mods[0].value[1:])
else:
return -float(amino_acid.mods[0].value[1:])
else:
return 0
# load fasta file into a dictionary
def load_fasta(fasta_file_path, selected=None, selected_prefix=""):
with open(fasta_file_path, "rt") as fasta_file:
result = {}
current_seq = ""
for line in fasta_file:
line = line.strip()
if line.startswith(">"):
if selected:
if selected_prefix + line[1:] in selected:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[line[1:]] = ""
current_seq = line[1:]
else:
result[current_seq] += line
return result
# Storing analysis result for each protein
class Result:
def __init__(self, df):
self.df = df
self.empty = df.empty
def separate_result(self):
normal_header = []
df = self.df
for c in df.columns:
if c in {"Protein", "Peptides", "Position", "Glycans"}:
normal_header.append(c)
else:
yield Result(df[normal_header+[c]])
def calculate_proportion(self, occupancy=True, separate_sample_df=False):
"""
calculate proportion of each glycoform from the dataset
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
"""
df = self.df.copy()
#print(df)
grouping_peptides = [# "Isoform",
"Peptides", "Position"]
grouping_position = [# "Isoform",
"Position"]
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if "Peptides" in df.columns:
gr = grouping_peptides
else:
gr = grouping_position
for _, g in df.groupby(gr):
if "Value" in g.columns:
total = g["Value"].sum()
for i, r in g.iterrows():
df.at[i, "Value"] = r["Value"] / total
else:
for c in g.columns:
if c not in {"Protein", "Peptides", "Position", "Glycans"}:
total = g[c].sum()
for i, r in g.iterrows():
df.at[i, c] = r[c] / total
if separate_sample_df:
return [df[gr + [c]] for c in df.columns]
return df
def to_summary(self, df=None, name="", trust_byonic=False, occupancy=True):
"""
:type trust_byonic: bool
whether or not to calculate calculate raw values for each individual position assigned by byonic
:type occupancy: bool
whether or not to calculate the proportion as occupancy which would includes unglycosylated form.
:type df: pd.DataFrame
"""
grouping_peptides = [# "Isoform",
"Peptides", "Position", "Glycans"]
grouping_position = [# "Isoform",
"Position", "Glycans"]
if df is None:
df = self.df
if "Protein" in df.columns:
grouping_peptides = ["Protein"] + grouping_peptides
grouping_position = ["Protein"] + grouping_position
if not occupancy:
df = df[df["Glycans"] != "U"]
if trust_byonic:
temp = df.set_index(grouping_position)
else:
temp = df.set_index(grouping_peptides)
if "Value" in temp.columns:
temp.rename(columns={"Value": name}, inplace=True)
else:
temp = temp.rename(columns={k: name for k in temp.columns if k not in {"Protein", "Peptides", "Position", "Glycans"}})
#print(temp)
return temp
# Object containing each individual protein. much of the methods involved in the analysis is contained within this object
# Each protein is assigned one of the GlypnirOcomponent object with a subset of their PD and Byonic data
class GlypnirOComponent:
def __init__(self, filename, area_filename=None, replicate_id=None, condition_id=None, protein_name=None, protein_column=protein_column_name, minimum_score=0, trust_byonic=False, legacy=False, mode=1):
sequence_column_name = "Peptide\n< ProteinMetrics Confidential >"
glycans_column_name = "Glycans\nNHFAGNa"
starting_position_column_name = "Starting\nposition"
modifications_column = "Modification Type(s)"
observed_mz_column_name = "Calc.\nmass (M+H)"
protein_column_name = "Protein Name"
self.protein_column = protein_column
self.sequence_column = None
self.glycans_column = None
self.starting_position_column = None
self.modifications_column = None
self.observed_mz_column = None
if type(filename) == pd.DataFrame:
data = filename.copy()
else:
if filename.endswith(".xlsx"):
data = pd.read_excel(filename, sheet_name="Spectra")
elif filename.endswith(".txt"):
data = pd.read_csv(filename, sep="\t")
if mode == 1:
self.protein_column = protein_column_name
self.sequence_column = sequence_column_name
self.glycans_column = glycans_column_name
self.starting_position_column = starting_position_column_name
self.modifications_column = modifications_column
self.observed_mz_column = observed_mz_column_name
if area_filename is not None:
if type(area_filename) == pd.DataFrame:
file_with_area = area_filename
else:
if area_filename.endswith("xlsx"):
file_with_area = pd.read_excel(area_filename)
else:
file_with_area = pd.read_csv(area_filename, sep="\t")
# Joining of area and glycan data for each PSM using scan number as merging point
data["Scan number"] = pd.to_numeric(data["Scan #"].str.extract("scan=(\d+)", expand=False))
data = pd.merge(data, file_with_area, left_on="Scan number", right_on="First Scan")
# Subset and filtering data for those with non blank area value and passing minimum score cutoff
self.protein_name = protein_name
self.data = data.sort_values(by=['Area'], ascending=False)
self.replicate_id = replicate_id
self.condition_id = condition_id
self.data = data[data["Area"].notnull()]
self.data = self.data[(self.data["Score"] >= minimum_score) &
((self.data[protein_column].str.contains(protein_name, regex=False)) | (self.data[protein_column].str.startswith(protein_name)))
# (data["Protein Name"] == ">"+protein_name) &
]
self.data = self.data[~self.data[protein_column].str.contains(">Reverse")]
elif mode == 2:
self.data, self.tmt_sample_info = self.process_tmt_pd_byonic(data)
if len(self.data.index) > 0:
self.empty = False
else:
self.empty = True
self.row_to_glycans = {}
self.glycan_to_row = {}
self.trust_byonic = trust_byonic
self.legacy = legacy
self.sequon_glycosites = set()
self.glycosylated_seq = set()
if mode == 2:
self.sequon_glycosites = {}
self.glycosylated_seq = {}
self.unique_rows = []
# method for calculate glycan mass from string syntax using regular expression and a dictionary of glycan block and their mass
def calculate_glycan(self, glycan):
current_mass = 0
current_string = ""
for i in glycan:
current_string += i
if i == ")":
s = glycan_regex.search(current_string)
if s:
name = s.group(1)
amount = s.group(2)
current_mass += glycan_block_dict[name]*int(amount)
current_string = ""
return current_mass
#process tmt_pd_byonic dataframe
def process_tmt_pd_byonic(self, df):
pattern = re.compile("\((\w+), (\w+)\)/\((\w+), (\w+)\)")
samples = {}
df = df[(df["Search Engine Rank"] == 1) & (df["Quan Usage"] == "Use")]
for c in df.columns:
s = pattern.search(c)
if s:
if s.group(4) not in samples:
samples[s.group(4)] = set()
samples[s.group(4)].add(s.group(3))
if s.group(2) not in samples:
samples[s.group(2)] = set()
samples[s.group(2)].add(s.group(1))
return df, samples
# process the protein data
def process(self, mode=1, tmt_info=None, tmt_minimum=2, **kwargs):
for k in kwargs:
if k in self.__dict__:
setattr(self, k, kwargs[k])
for i, r in self.data.iterrows():
glycan_dict = {}
if mode == 1:
search = sequence_regex.search(r[self.sequence_column])
# get peptide sequence without flanking prefix and suffix amino acids then create a Sequence object from the string
seq = Sequence(search.group(0))
# get unformated string from the Sequence object. This unformatted string contain a "." at both end
elif mode == 2:
seq = Sequence(r[self.sequence_column].upper())
stripped_seq = seq.to_stripped_string()
origin_seq = r[self.starting_position_column] - 1
self.data.at[i, "origin_start"] = origin_seq
# Get parse glycans from glycan column into a list
glycans = []
if mode == 2:
assert tmt_info is not None
score = 0
tmt_pass = False
for c in tmt_info:
tmt_data = r.loc[tmt_info[c]]
if tmt_data.count() >= tmt_minimum:
score += 1
if score >= len(tmt_info):
tmt_pass = True
self.data.at[i, "tmt_pass"] = tmt_pass
if not tmt_pass:
pass
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
if mode == 1:
if search:
# store the unformatted sequence without "." at both end into the dataframe
self.data.at[i, "stripped_seq"] = stripped_seq.rstrip(".").lstrip(".")
# calculate the programmatic starting position of the sequence
glycan_reordered = []
#calculate the programmatic stopping position of the sequence
self.data.at[i, "Ending Position"] = r[self.starting_position_column] + len(self.data.at[i, "stripped_seq"])
self.data.at[i, "position_to_glycan"] = ""
if self.trust_byonic:
n_site_status = {}
p_n = r[self.protein_column].lstrip(">")
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
# creating dictionary storing the glycan and its mass
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
self.glycan_to_row[g] = i
glycosylated_site = []
# iterating through the unformated sequence and assign glycan to modified position based on the modified mass
for aa in range(1, len(seq) - 1):
if seq[aa].mods:
try:
mod_value = float(seq[aa].mods[0].value)
round_mod_value = round(mod_value)
round_3 = round(mod_value, 3)
# if the glycan is identified to be found, store the position of the glycosylated amino acid on the protein sequence for later reference
if str(round_3) in glycan_dict:
seq[aa].extra = "Glycosylated"
pos = int(r[self.starting_position_column]) + aa - 2
self.sequon_glycosites.add(pos + 1)
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = seq[aa].value + str(pos + 1)
glycosylated_site.append(self.data.at[i, position] + "_" + str(round_mod_value))
glycosylation_count += 1
glycan_reordered.append(glycan_dict[str(round_3)])
except ValueError:
pass
if glycan_reordered:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
else:
# if the analysis is only done on peptide and glycan combination, we would only need to set whether the peptide is glycosylated and store the unformatted peptide sequence of the glycosylated one for later reference
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
glycans.sort()
self.data.at[i, self.glycans_column] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
self.glycosylated_seq.add(self.data.at[i, "stripped_seq"])
#print(self.glycosylated_seq)
elif mode == 2:
self.data.at[i, "stripped_seq"] = stripped_seq
glycan_reordered = []
# calculate the programmatic stopping position of the sequence
self.data.at[i, "Ending Position"] = r[self.starting_position_column] + len(
self.data.at[i, "stripped_seq"])
if self.trust_byonic:
# current_glycan = 0
max_glycans = len(glycans)
glycosylation_count = 1
# creating dictionary storing the glycan and its mass
if max_glycans:
self.row_to_glycans[i] = np.sort(glycans)
for g in glycans:
data_gly = self.calculate_glycan(g)
glycan_dict[str(round(data_gly, 3))] = g
if r[self.protein_column] not in self.glycan_to_row:
self.glycan_to_row[r[self.protein_column]] = {}
self.glycan_to_row[r[self.protein_column]][g] = i
glycosylated_site = []
# iterating through the unformated sequence and assign glycan to modified position based on
# the modified mass
if pd.notnull(r["Modifications"]):
mod_list = r["Modifications"].split(";")
for mod in mod_list:
search_mod = tmt_mod_regex.search(mod.strip())
if search_mod:
if search_mod.group(2) in glycans:
if r[self.protein_column] not in self.sequon_glycosites:
self.sequon_glycosites[r[self.protein_column]] = set()
self.sequon_glycosites[r[self.protein_column]].add(int(search_mod.group(1))-1 + r[self.starting_position_column])
position = "{}_position".format(str(glycosylation_count))
self.data.at[i, position] = stripped_seq[int(search_mod.group(1))-1].upper() + str(int(search_mod.group(1))-1 + r[self.starting_position_column])
glycosylated_site.append(self.data.at[i, position] + "_" + search_mod.group(2))
glycosylation_count += 1
glycan_reordered.append(search_mod.group(2))
if glycan_reordered:
if len(glycan_reordered) > 1:
self.data.at[i, "position_to_glycan"] = ",".join(glycan_reordered)
else:
self.data.at[i, "position_to_glycan"] = glycan_reordered[0]
if glycosylated_site:
if len(glycosylated_site) > 1:
self.data.at[i, "glycoprofile"] = ";".join(glycosylated_site)
else:
self.data.at[i, "glycoprofile"] = glycosylated_site[0]
else:
# if the analysis is only done on peptide and glycan combination, we would only need to set whether the peptide is glycosylated and store the unformatted peptide sequence of the glycosylated one for later reference
if pd.notnull(r[self.glycans_column]):
glycans = r[self.glycans_column].split(",")
glycans.sort()
self.data.at[i, self.glycans_column] = ",".join(glycans)
self.data.at[i, "glycosylation_status"] = True
if r[self.protein_column] not in self.glycosylated_seq:
self.glycosylated_seq[r[self.protein_column]] = set()
self.glycosylated_seq[r[self.protein_column]].add(self.data.at[i, "stripped_seq"])
# print(self.glycosylated_seq)
# analyze the compiled data by identifying unique PSM and calculate cumulative raw area under the curve
def analyze(self, max_sites=0, combine_d_u=True, splitting_sites=False, debug=False, protein_column=protein_column_name, glycans_column=glycans_column_name, starting_position_column=starting_position_column_name, observed_mz_column=observed_mz_column_name, mode=1, tmt_info=None):
result = []
# sort the data first by area then score in descending order.
if mode == 1:
temp = self.data.sort_values(["Area", "Score"], ascending=False)
if self.trust_byonic:
grouping = ["stripped_seq", "glycoprofile", observed_mz_column]
else:
grouping = ["stripped_seq", glycans_column, starting_position_column, observed_mz_column]
elif mode == 2:
from scipy.stats import rankdata
temp = self.data[self.data["tmt_pass"] == True]
if self.trust_byonic:
grouping = [protein_column, "stripped_seq", "Modifications", "glycoprofile", "Charge"]
else:
grouping = [protein_column, "stripped_seq", glycans_column, starting_position_column, observed_mz_column]
area_columns = []
for a in tmt_info.values():
for aa in a:
area_columns.append(aa)
area_columns.sort(key=int)
# cells in glycan column with no glycan will be assigned a string "None"
temp[glycans_column] = temp[glycans_column].fillna("None")
if "glycoprofile" in temp.columns:
temp["glycoprofile"] = temp["glycoprofile"].fillna("U")
out = []
if self.trust_byonic:
# if trust byonic we would analyze by grouping the data at unformatted sequence, glycosylated positions and calculated m/z
if mode == 1:
seq_glycosites = list(self.sequon_glycosites)
elif mode == 2:
seq_glycosites = {}
for i in self.sequon_glycosites:
seq_glycosites[i] = list(self.sequon_glycosites[i])
seq_glycosites[i].sort()
for i, g in temp.groupby(grouping):
seq_within = []
# select row with highest area value in a group
if mode == 1:
max_area_row = g["Area"].idxmax()
elif mode == 2:
channel_ranks = []
for channel in area_columns:
g[channel+"_rank"] = pd.Series(rankdata(g[channel].values), index=g[channel].index)
channel_ranks.append(channel+"_rank")
g["score_rank"] = g.apply(lambda row: np.sum(row[channel_ranks]), axis=1)
max_area_row = g["score_rank"].idxmax()
unique_row = g.loc[max_area_row]
#print(unique_row)
#print(seq_glycosites, i)
if mode == 1:
for n in seq_glycosites:
#print(n, unique_row[starting_position_column], unique_row["Ending Position"])
# create a list of n glycosylation sites that can be found within the peptide sequence
if unique_row[starting_position_column] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][
int(n - unique_row[starting_position_column])].upper() + str(n))
if mode == 2:
if i[0] in seq_glycosites:
if seq_glycosites[i[0]]:
for n in seq_glycosites[i[0]]:
#print(n, unique_row[starting_position_column], unique_row["Ending Position"])
# create a list of n glycosylation sites that can be found within the peptide sequence
if unique_row[starting_position_column] <= n < unique_row["Ending Position"]:
# print(unique_row["stripped_seq"], n, unique_row[starting_position_column_name])
seq_within.append(
unique_row["stripped_seq"][int(n - unique_row[starting_position_column])].upper() + str(n))
glycosylation_count = 0
glycans = []
if pd.notnull(unique_row["position_to_glycan"]):
glycans = unique_row["position_to_glycan"].split(",")
# create a dataset of position, glycans associate to that position and area under the curve of them
if debug:
if seq_within:
self.unique_rows.append(unique_row)
for c in range(len(unique_row.index)):
if unique_row.index[c].endswith("_position"):
if pd.notnull(unique_row[unique_row.index[c]]):
pos = unique_row[unique_row.index[c]]
#print(pos)
#print(seq_within)
if glycans:
if mode == 1:
result.append({"Position": pos, "Glycans": glycans[glycosylation_count], "Value": unique_row["Area"]})
elif mode == 2:
dic = {"Protein": i[0], "Position": pos, "Glycans": glycans[glycosylation_count]}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
ind = seq_within.index(pos)
seq_within.pop(ind)
glycosylation_count += 1
if seq_within:
for s in seq_within:
if mode == 1:
result.append({"Position": s, "Glycans": "U", "Value": unique_row["Area"]})
elif mode == 2:
dic = {"Protein": i[0], "Position": pos, "Glycans": "U"}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
if result:
result = pd.DataFrame(result)
# sum area under the curve of those with the same glycosylation position and glycan composition
if mode == 1:
group = result.groupby(["Position", "Glycans"])
elif mode == 2:
group = result.groupby(["Protein", "Position", "Glycans"])
out = group.agg(np.sum).reset_index()
else:
if mode == 1:
out = pd.DataFrame([], columns=["Position", "Glycans", "Values"])
elif mode == 2:
out = pd.DataFrame([], columns=["Protein", "Position", "Glycans", "Values"])
else:
# if a peptide level analysis was done, the grouping would be on unformatted sequence, glycan combination, position of the peptide N-terminus, calculated m/z
for i, g in temp.groupby(grouping):
# select and create a dataset of unique psm compositing of the unformatted sequence, glycans, area under the curve and position of the peptide N-terminus
if mode == 1:
max_area_row = g["Area"].idxmax()
elif mode == 2:
for channel in area_columns:
g[channel+"_rank"] = pd.Series(rankdata(g[channel]), index=g[channel].index)
g["score_rank"] = g.apply(lambda row: np.sum(row[area_columns]), axis=1)
max_area_row = g["score_rank"].idxmax()
#print(g)
unique_row = g.loc[max_area_row]
if debug:
self.unique_rows.append(unique_row)
if unique_row[glycans_column] != "None":
if mode == 1:
result.append(
{"Peptides": i[0], "Glycans": i[1], "Value": unique_row["Area"], "Position": i[2]})
elif mode == 2:
dic = {"Protein": i[0], "Peptides": i[1], "Position": i[3], "Glycans": i[2]}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
else:
if mode == 1:
result.append({"Peptides": i[0], "Glycans": "U", "Value": unique_row["Area"], "Position": i[2]})
elif mode == 2:
dic = {"Protein": i[0], "Peptides": i[1], "Position": i[3], "Glycans": "U"}
for column in area_columns:
dic[column] = unique_row[column]
result.append(dic)
result = pd.DataFrame(result)
# sum those area under the curve with the same peptides, position and glycans
if mode == 1:
group = result.groupby(["Peptides", "Position", "Glycans"])
elif mode == 2:
group = result.groupby(["Protein", "Peptides", "Position", "Glycans"])
out = group.agg(np.sum, axis=0).reset_index()
#print(out)
return Result(out)
class GlypnirO:
def __init__(self, trust_byonic=False, get_uniprot=False, debug=False, parse_uniprot=False):
self.trust_byonic = trust_byonic
self.components = None
self.uniprot_parsed_data = pd.DataFrame([])
self.get_uniprot = get_uniprot
self.unique_dict = {}
self.debug = debug
self.parse_uniprot = parse_uniprot
def add_component(self, filename, area_filename, replicate_id, sample_id):
component = GlypnirOComponent(filename, area_filename, replicate_id, sample_id)
# loading of input experiment file
def add_batch_component(self, component_list, minimum_score, protein=None, combine_uniprot_isoform=True, legacy=False, protein_column=protein_column_name, starting_position_column=starting_position_column_name):
self.load_dataframe(component_list)
protein_list = []
if protein is not None:
self.components["Protein"] = pd.Series([protein]*len(self.components.index), index=self.components.index)
for i, r in self.components.iterrows():
comp = GlypnirOComponent(r["filename"], r["area_filename"], r["replicate_id"], condition_id=r["condition_id"], protein_name=protein, minimum_score=minimum_score, trust_byonic=self.trust_byonic, legacy=legacy)
self.components.at[i, "component"] = comp
print("{} - {}, {} peptides has been successfully loaded".format(r["condition_id"], r["replicate_id"], str(len(comp.data.index))))
else:
components = []
for i, r in self.components.iterrows():
data = | pd.read_excel(r["filename"], sheet_name="Spectra") | pandas.read_excel |
import argparse
import os
import pickle
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import ot
import pandas as pd
import pyabc
import utils
from scipy.stats import invgamma
np.random.seed(1)
random.seed(1)
def distance_fn(type, k=2, m=32):
if type == "bombOT":
return lambda x, y: utils.BoMbOT(x["data"], y["data"], k=k, m=m)
elif type == "mOT":
return lambda x, y: utils.mOT(x["data"], y["data"], k=k, m=m)
else:
raise ValueError("Distance type should be bombOT or mOT")
def save_results(history, dirname):
# Create directory that will contain the results
if not os.path.exists(dirname):
os.makedirs(dirname)
for it in range(history.max_t + 1):
# Save the posterior distribution at each ABC iteration
filename = "posterior_it=" + str(it) + ".csv"
df, w = history.get_distribution(m=0, t=it)
df["weight"] = w
df.to_csv(os.path.join(dirname, filename))
# Save extended information at each iteration, including weighted distances that the parameter samples achieve
filename = "info_it=" + str(it) + ".csv"
df = history.get_population_extended(m=0, t=it)
df.to_csv(os.path.join(dirname, filename))
# Save information on the evolution of epsilon, the number of sample attempts per iteration and the iteration times
filename = "all_populations.csv"
df = history.get_all_populations()
# df['times'] = np.insert(times, 0, 0)
df.to_csv(os.path.join(dirname, filename))
def plot_posterior(param, dim, n_obs, n_it, n_particles, types, labels, k, m):
# Matplotlib settings
plt.rcParams["lines.linewidth"] = 1
directory = os.path.join(
"results",
param
+ "_dim="
+ str(dim)
+ "_n_obs="
+ str(n_obs)
+ "_n_particles="
+ str(n_particles)
+ "_n_it="
+ str(n_it)
+ "_k="
+ str(k)
+ "_m="
+ str(m),
)
# Plot true posterior pdf
fig = plt.figure(0, figsize=(4, 2))
with open(os.path.join(directory, "true_posterior"), "rb") as f:
post_samples = pickle.load(f)
pyabc.visualization.plot_kde_1d(
| pd.DataFrame({"post_samples": post_samples}) | pandas.DataFrame |
#!/usr/bin/env python3
# compare event timing from adult raters with event timing from children raters
import numpy as np
import pandas as pd
from settings import *
def get_boundaries(df,agedf):
PartIDs = np.array(df.loc[df['Screen Name'] == 'Desc_Me']['Participant Public ID'].value_counts()[df.loc[df['Screen Name'] == 'Desc_Me']['Participant Public ID'].value_counts()>1].index)
df=df[df['Participant Public ID'].isin(PartIDs)]
agedf = agedf[agedf['Participant Public ID'].isin(PartIDs)]
boundaries = pd.to_numeric(df.loc[df['Screen Name'] == 'Desc_Me']['Reaction Time']).values
spike_boundaries = np.round(boundaries/1000/TR,0).astype(int)
counts = np.append(np.bincount(spike_boundaries)[:-2],np.bincount(spike_boundaries)[-1])
ev_conv = np.convolve(counts,hrf)[:nTR]
# Subject ages:
Ages = []
for sub in PartIDs:
subdf = agedf[agedf['Participant Public ID'].isin([sub])]
Ages.append(pd.to_numeric(subdf[subdf['Question Key']=='age-year']['Response'].values)[0] + pd.to_numeric(subdf[subdf['Question Key']=='age-month']['Response'].values)[0] / 12)
return spike_boundaries,ev_conv,Ages,df,agedf
def xcorr(a,b):
# This helped convince me I'm doing the right thing:
# https://currents.soest.hawaii.edu/ocn_data_analysis/_static/SEM_EDOF.html
a = (a - np.mean(a)) / (np.std(a))
b = (b - np.mean(b)) / (np.std(b))
c = np.correlate(a, b, 'full')/max(len(a),len(b))
return c
segpath = codedr + 'HBN_fmriprep_code/video_segmentation/'
ev_figpath = figurepath+'event_annotations/'
nTR = 750
TR = 0.8
# HRF (from AFNI)
dt = np.arange(0, 15,TR)
p = 8.6
q = 0.547
hrf = np.power(dt / (p * q), p) * np.exp(p - dt / q)
eventdict = {key:{} for key in ['timing','annotation']}
for csv in glob.glob(segpath+'*csv'):
initials = csv.split('/')[-1].split('-')[0]
df = pd.read_csv(csv)
if not any('TR' in c for c in df.columns):
df.columns = df.iloc[0]
df = df.iloc[1:]
df = df.loc[:, df.columns.notnull()]
TRstr = [t for t in df.columns if 'TR' in t][0]
if TRstr != 'TR':
df = df[(df['Scene Title '].notna()) & (df['Start TR'].notna())]
df = df.rename(columns={'Scene Title ': 'Segment details'})
eventdict['timing'][initials] = [int(tr) for tr in list(df[TRstr]) if not pd.isnull(tr)]
eventdict['annotation'][initials] = list(df['Segment details'])
nsubj = len(eventdict['timing'])
nevent = []
ev_annot = []
for v in eventdict['timing'].values():
ev_annot.extend(v)
nevent.append(len(v))
ev_annot = np.asarray(ev_annot, dtype=int)
counts = np.append(np.bincount(ev_annot)[:-2],np.bincount(ev_annot)[-1])
ev_conv = np.convolve(counts,hrf)[:nTR]
Prolificdf = pd.read_csv('data_exp_68194-v4/data_exp_68194-v4_task-1t2b.csv')
Prolificagedf = | pd.read_csv('data_exp_68194-v4/data_exp_68194-v4_questionnaire-xtqr.csv') | pandas.read_csv |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from ...serialize import ValueType, ListField, StringField, BoolField, AnyField
from ... import opcodes as OperandDef
from ...utils import lazy_import
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
cudf = lazy_import('cudf')
class DataFrameConcat(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.CONCATENATE
_axis = AnyField('axis')
_join = StringField('join')
_join_axes = ListField('join_axes', ValueType.key)
_ignore_index = BoolField('ignore_index')
_keys = ListField('keys')
_levels = ListField('levels')
_names = ListField('names')
_verify_integrity = BoolField('verify_integrity')
_sort = BoolField('sort')
_copy = BoolField('copy')
def __init__(self, axis=None, join=None, join_axes=None, ignore_index=None,
keys=None, levels=None, names=None, verify_integrity=None,
sort=None, copy=None, sparse=None, object_type=None, **kw):
super(DataFrameConcat, self).__init__(
_axis=axis, _join=join, _join_axes=join_axes, _ignore_index=ignore_index,
_keys=keys, _levels=levels, _names=names,
_verify_integrity=verify_integrity, _sort=sort, _copy=copy,
_sparse=sparse, _object_type=object_type, **kw)
@property
def axis(self):
return self._axis
@property
def join(self):
return self._join
@property
def join_axes(self):
return self._join_axes
@property
def ignore_index(self):
return self._ignore_index
@property
def keys(self):
return self._keys
@property
def level(self):
return self._levels
@property
def name(self):
return self._name
@property
def verify_integrity(self):
return self._verify_integrity
@property
def sort(self):
return self._sort
@property
def copy_(self):
return self._copy
@classmethod
def execute(cls, ctx, op):
def _base_concat(chunk, inputs):
# auto generated concat when executing a DataFrame, Series or Index
if chunk.op.object_type == ObjectType.dataframe:
return _auto_concat_dataframe_chunks(chunk, inputs)
elif chunk.op.object_type == ObjectType.series:
return _auto_concat_series_chunks(chunk, inputs)
else:
raise TypeError('Only DataFrameChunk, SeriesChunk and IndexChunk '
'can be automatically concatenated')
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return | pd.concat(inputs, axis=op.axis) | pandas.concat |
import os
import pickle
import pandas as pd
from . import feature_selection
PATRIC_FILE_EXTENSION_TO_PGFAM_COL = {'.txt' : 'pgfam', '.tab' : 'pgfam_id'}
GENOME_ID = 'Genome ID'
LABEL = 'Label'
HP = 'HP'
NHP = 'NHP'
def read_merged_file(file_path):
"""
Reads genomes merged file into pd.Series object
Parameters
----------
file_path - path to a merged input *.fasta file
Returns
----------
pd.Series object that represents the all input genomes of the merged file
"""
genomes_order = []
genome_to_pgfams = {}
with open(file_path) as f:
genome_id = ''
for line in f:
if line.startswith('>'):
genome_id = line.strip()[1:]
genomes_order.append(genome_id)
else:
pgfam_id = line.strip()
genome_to_pgfams.setdefault(genome_id, []).append(pgfam_id)
genomes_pgfams = [' '.join(genome_to_pgfams[genome]) for genome in genomes_order]
return pd.Series(genomes_pgfams, index=genomes_order, dtype="string")
def read_genome_file(file_entry, pgfam_col):
"""
Reads a single genome file and returns its contained pgfams
Parameters
----------
file_entry - entry to an input genome file
Returns
----------
pd.Series object that represents all the input genomes in the directory
"""
pgfams = pd.read_csv(file_entry, usecols=[pgfam_col], sep='\t').dropna()
pgfams = ' '.join(list(pgfams[pgfam_col]))
return pgfams
def read_files_in_dir(dir_path):
"""
Reads all genomes *.txt/*.tab files in a directory into pd.Series object
Parameters
----------
dir_path - a path to an input directory with genome *.txt/*.tab files
Returns
----------
pd.Series object that represents all the input genomes in the directory
"""
genomes_ids = []
genomes_pgfams = []
with os.scandir(dir_path) as entries:
for entry in entries:
if entry.is_file():
for extension, pgfam_col in PATRIC_FILE_EXTENSION_TO_PGFAM_COL.items():
if entry.name.endswith(extension):
genome_id = entry.name.split(extension)[0]
pgfams = read_genome_file(entry, pgfam_col)
genomes_ids.append(genome_id)
genomes_pgfams.append(pgfams)
break
return | pd.Series(genomes_pgfams, index=genomes_ids, dtype="string") | pandas.Series |
"""This module containts the Universe class.
It can be used to group and manage descriptive, fundamental, and price data
for a number of equities.
"""
import pathlib
import tablib
import pandas as pd
import equities.utils
class Universe:
"""Represents a universe of equities."""
SUPPORTED_STORAGE_FORMATS = ['xlsx', 'json']
def __init__(self, filename=None, prices_helper=None, prices_yr_period=5):
"""Creates a Universe object.
filename -- the filename of the universe to load; xlsx or json.
prices_helper -- helper function for fetching price data.
prices_yr_period -- how many years of price data to request.
"""
self._id_column_name = 'Ticker'
self._prices_yr_period = prices_yr_period
self._prices_index = 'date'
self.filename = filename
if self.filename:
self.load(self.filename)
else:
self._universe_file = None
self.equities = tablib.Dataset()
self.prices = pd.DataFrame()
if prices_helper is None:
self._prices_helper = equities.utils.download_iex_eod_prices
else:
self._prices_helper = prices_helper
def __len__(self):
"""Returns the number of equities in the universe."""
return len(self.tickers)
def import_file(self, filename, id_column_name=None):
"""Import a CSV, XLS/XLSX, JSON file with descriptive stock data."""
old_equities = self.equities
self.equities.load(open(filename).read())
col_names = self.columns
if id_column_name:
if id_column_name not in col_names:
# User is asking for a column name that doesn't exist
# in the imported file.
self.equities = old_equities
msg = "Column name '{}' not found in file".\
format(id_column_name)
raise IdColumnError(msg)
else:
self._id_column_name = id_column_name
else:
if self._id_column_name not in col_names:
if self._id_column_name.lower() in col_names:
self._id_column_name = self._id_column_name.lower()
else:
# Give up at this point.
self.equities = old_equities
msg = "No column named '{}' or '{}' in data.".\
format(self._id_column_name,
self._id_column_name.lower())
raise IdColumnError(msg)
@property
def columns(self):
"""Return a list of equity column names."""
if not self.equities.dict:
return []
return self.equities.headers
@property
def tickers(self):
"""Return a list of tickers in the universe."""
if not self.equities.dict:
return []
return self.equities[self._id_column_name]
def equity(self, ticker_symbol):
"""Return a dict with data for given ticker symbol."""
if not self.equities.dict:
raise TickerSymbolNotFound
try:
row_index = \
self.equities[self._id_column_name].index(ticker_symbol)
except ValueError:
raise TickerSymbolNotFound from None
# Replace 'None' strings with real None objects.
data = [None if v == 'None' else v for v in
self.equities[row_index]]
return dict(zip(self.columns, data))
def save(self, filename=None):
"""Save the current universe to a file."""
if not filename and not self.filename:
raise UniverseFilenameNotSet
elif filename:
self.filename = filename
book = tablib.Databook()
book.add_sheet(self.equities)
if not self.prices.empty:
prices = tablib.Dataset().load(self.prices.to_csv())
book.add_sheet(prices)
with open(self.filename, 'wb') as fname:
fname.write(book.xlsx)
def load(self, universe_file):
"""Load a universe from a file."""
book = tablib.Databook()
file_format = self._detect_file_format(universe_file)
with open(universe_file, 'rb') as filename:
book.load(filename.read(), file_format)
self._universe_file = book
self.equities = book.sheets()[0]
if self._universe_file.size > 1:
# We assume the second sheet is prices.
self.prices = book.sheets()[1].df
# Ensure prices index is Datetime.
try:
self.prices.set_index(
pd.DatetimeIndex(self.prices[self._prices_index]),
inplace=True
)
except KeyError:
pass
else:
self.prices = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = | DataFrame() | pandas.core.api.DataFrame |
import os
import copy
import datetime
import numpy as np
import xarray as xr
import pandas as pd
from collections import Counter
from ahh.ext import (round_to, get_order_mag, report_err, lonw2e)
from ahh.sci import get_stats, get_norm_anom, get_anom, get_norm
from ahh.era import td2dict
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import YearLocator, MonthLocator, DayLocator,\
HourLocator, MinuteLocator, AutoDateLocator, \
DateFormatter, AutoDateFormatter
from matplotlib.ticker import MultipleLocator, \
FormatStrFormatter
import matplotlib.dates as mdates
__author__ = '<EMAIL>'
__copyright__ = '<NAME>'
class MissingInput(Exception):
pass
class Unsupported(Exception):
pass
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT = {
'scale': 1,
'projection': None,
'dpi': 105,
'sizes': {
'figure': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'text': {'smallest': 5.5,
'smaller': 7.5,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'line': {'smallest': 0.4,
'smaller': 0.65,
'small': 1,
'medium': 1.15,
'large': 1.3,
'larger': 1.5,
'largest': 2
},
'tick': {'smallest': 0.05,
'smaller': 0.15,
'small': 0.2,
'medium': 0.55,
'large': 1.0,
'larger': 1.25,
'largest': 1.5
},
'bar': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'marker': {'smallest': 6,
'smaller': 9,
'small': 12,
'medium': 14,
'large': 16,
'larger': 20,
'largest': 24
},
'title pad': {'smallest': 0.985,
'smaller': 0.995,
'small': 1.0,
'medium': 1.01,
'large': 1.03,
'larger': 1.05,
'largest': 1.07
},
'pad': {'smallest': 0.15,
'smaller': 0.2,
'small': 0.3,
'medium': 0.45,
'large': 0.6,
'larger': 0.85,
'largest': 1.0
}
},
'styles': {
'color': {'green': '#145222',
'red': '#DF0909',
'orange': '#E68D00',
'pink': '#CE5F5F',
'magenta': '#9E005D',
'teal': '#66A7C5',
'yellow': '#E0D962',
'stone': '#6462E0',
'blue': '#2147B1',
'purple': '#630460',
'black': '#202020',
'light gray': '#DADADA',
'gray': '#5B5B5B',
'white': '#FFFFFF',
},
'tc_color': {'dep': '#7EC6FF',
'storm': '#00F9F3',
'one': '#FFFFC6',
'two': '#FFFF5A',
'three': '#FFD97E',
'four': '#FF9C00',
'five': '#FF5454'
},
'alpha': {'transparent': 0.2,
'translucid': 0.3,
'translucent': 0.5,
'semi opaque': 0.75,
'opaque': 0.95,
}
},
'figtext': {'loc': 'bottom right',
'center bottom': {
'xy_loc': (0.5, 0.05),
'ha': 'center',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center left': {'xy_loc': (0.1, 0.5),
'ha': 'right',
'va': 'center',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.15,
'top_marg': 0.95
},
'center right': {'xy_loc': (0.9, 0.5),
'ha': 'left',
'va': 'center',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom left': {'xy_loc': (0.1, 0.075),
'ha': 'right',
'va': 'bottom',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'bottom right': {'xy_loc': (0.9, 0.075),
'ha': 'left',
'va': 'bottom',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper left': {'xy_loc': (0.1, 0.925),
'ha': 'right',
'va': 'top',
'lef_marg': 0.175,
'rig_marg': 0.95,
'bot_marg': 0.05,
'top_marg': 0.95
},
'upper right': {'xy_loc': (0.9, 0.925),
'ha': 'left',
'va': 'top',
'lef_marg': 0.05,
'rig_marg': 0.85,
'bot_marg': 0.05,
'top_marg': 0.95
},
}
}
SIZES = DEFAULT['sizes']
STYLES = DEFAULT['styles']
COLORS = STYLES['color']
ALPHAS = STYLES['alpha']
COLOR_LIST = [COLORS['red'], COLORS['teal'], COLORS['magenta'],
COLORS['stone'], COLORS['green'], COLORS['purple'],
COLORS['blue'], COLORS['light gray'], COLORS['pink'],
COLORS['orange'], COLORS['gray'], COLORS['yellow'],
COLORS['black']]
MISC_COLOR_LIST = [
'#fb2424',
'#24d324',
'#2139d5',
'#21bdbd',
'#cf0974',
'#f96710',
'#ccc506',
'#780e96',
'#32a26e',
'#f89356'
]
WARM_COLOR_LIST = [
'#82050b',
'#d50303',
'#f33f00',
'#f38f00',
'#f0d073'
]
COOL_COLOR_LIST = [
'#b9ddb4',
'#65c2a5',
'#3287bd',
'#4f32bd',
'#84038c'
]
HOT_COLOR_LIST = [
'#641502',
'#ab0b0b',
'#c03210',
'#e27123',
'#ffbb3e',
'#f6cb7b'
]
WET_COLOR_LIST = [
'#badbee',
'#6cb8d0',
'#59ba85',
'#3d9e3a',
'#008000',
'#003333'
]
DRY_COLOR_LIST = [
'#480505',
'#7d3e14',
'#ac6434',
'#cf9053',
'#c9c85b',
'#ebe696'
]
NEON_COLOR_LIST = [
'#7bfc73',
'#b0cd42',
'#cd7842',
'#9a3d5a',
'#46224b'
]
DIV_COLOR_LIST = (WARM_COLOR_LIST + COOL_COLOR_LIST)[::-1]
# https://www.ncl.ucar.edu/Document/Graphics/color_tables.shtml
NCL_CMAPS = pd.read_pickle(os.path.join(THIS_DIR, 'data', 'ncl_cmaps.pkl'))
NCL_CMAP_NAMES = NCL_CMAPS.columns.tolist()
def prettify_ax(ax,
alpha=0.75,
xlabel=None,
ylabel=None,
title=None,
suptitle=False,
matchcolor=True,
legend='best',
title_pad=1.025,
length_scale=False,
ticks=True):
"""
Beautify a plot axis.
:param ax: (matplotlib.axes) - original axis
:param alpha: (float) - how transparent it is
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param suptitle: (boolean) - whether to make a figure title
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param legend: (str) - location of legend
:param title_pad: (scalar) - distance between box and title
:param length_scale: (scalar) - whether to scale the labels based on length
:param ticks: (boolean) - whether to modify ticks
:return ax: (matplotlib.axes) - prettified axis
"""
if xlabel is None:
xlabel = plt.getp(ax, 'xlabel')
if ylabel is None:
ylabel = plt.getp(ax, 'ylabel')
if title is None:
title = plt.getp(ax, 'title')
set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,
title=title, title_pad=title_pad, length_scale=length_scale)
plots = plt.getp(ax, 'children')
for plot in plots:
if plot.axes is not None:
try:
if matchcolor:
edgecolor = plt.getp(plot, 'facecolor')
plt.setp(plot,
edgecolor=edgecolor,
alpha=alpha)
except:
plt.setp(plot, alpha=alpha)
set_legend(ax, loc=legend)
set_borders(ax)
if ticks:
set_major_grid(ax)
set_major_ticks(ax)
set_major_tick_labels(ax)
set_minor_grid(ax)
set_minor_ticks(ax)
set_minor_tick_labels(ax)
return ax
def prettify_bokeh(p,
title_size=15,
xlabel_size=15,
ylabel_size=15,
ytick_label_size=10,
xtick_label_size=10,
legend_size=10,
font='century gothic'):
"""
Scales bokeh plot's label sizes based on figure size
:param p: (bokeh.figure) - bokeh figure
:param title_size: (scalar) - title size
:param xlabel_size: (scalar) - x label size
:param ylabel_size: (scalar) - y label size
:param xtick_label_size: (scalar) - x tick label size
:param ytick_label_size: (scalar) - y tick label size
:param legend: (scalar) - size of legend labels
:param font: (str) - font of labels
:return p: (bokeh.figure) - bokeh figure
"""
title_size = str(scale_it_bokeh(p, title_size, 1)) + 'pt'
xlabel_size = str(scale_it_bokeh(p, xlabel_size, 1)) + 'pt'
ylabel_size = str(scale_it_bokeh(p, ylabel_size, 1)) + 'pt'
xtick_label_size = str(scale_it_bokeh(p, xtick_label_size, 1)) + 'pt'
ytick_label_size = str(scale_it_bokeh(p, ytick_label_size, 1)) + 'pt'
legend_size = str(scale_it_bokeh(p, legend_size, 1)) + 'pt'
p.title.text_font_size = title_size
p.title.text_font_style = 'normal'
p.title.text_font = font
p.title.align = 'left'
p.title.offset = 5
p.xaxis.axis_label_text_font_style = 'normal'
p.xaxis.axis_label_text_font = font
p.xaxis.axis_label_text_font_size = xlabel_size
p.xaxis.major_tick_line_color = 'white'
p.xaxis.major_label_text_font_size = xtick_label_size
p.xaxis.axis_line_width = 0.01
p.xaxis.minor_tick_line_color = 'white'
p.yaxis.axis_label_standoff = 16
p.yaxis.axis_label_text_font_style = 'normal'
p.yaxis.axis_label_text_font = font
p.yaxis.axis_label_text_font_size = ylabel_size
p.yaxis.major_tick_line_color = 'white'
p.yaxis.major_label_text_font_size = ytick_label_size
p.yaxis.minor_tick_line_color = 'white'
p.yaxis.axis_line_width = 0.01
p.grid.grid_line_dash = 'solid'
p.legend.location = 'top_left'
p.legend.background_fill_alpha = 0
p.legend.border_line_alpha = 0
p.legend.label_text_font_size = legend_size
return p
def plot_map(data, lats=None, lons=None, figsize=None, ax=None, stipple=None,
cmap='BlueWhiteOrangeRed', orientation='horizontal', wrap=True,
data_lim=None, vmin=None, vmax=None, balance=True,
lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None, region=None,
title='', title_pad=1.025, suptitle=False,
lat_labels='auto', lon_labels='auto', length_scale=True,
rows=1, cols=1, pos=1, fmt=None,
cbar=True, cbar_label='', shrink=0.25,
contourf=True, interval=None, tick_locs=None,
data2=None, lats2=None, lons2=None,
contour=None, contour2=None,
clabel=True, clabel2=True,
mask_land=False, mask_ocean=False,
land=False, ocean=False, coastlines=True, rivers=False,
countries=False, states=False, lakes=False,
projection=None, central_longitude=0, tight_layout='auto',
dpi=DEFAULT['dpi'], save='', close=True, returnplot=False,
**kwargs
):
"""
Makes a map on a subplot.
:param data: (array) - data to be mapped
:param lats: (array) - array of latitudes
:param lons: (array) - array of longitudes
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stipple: (array) - array of values to be stippled
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param wrap: (boolean) - fill missing data at prime meridian
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param region: (str) region to quickly subset lat and lon extent (na or us)
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param length_scale: (scalar) - whether to scale the labels based on length
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - how much to shrink the color bar
:param contourf: (boolean) - whether to cartoonize colormap
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param data2: (array) - contours to be mapped
:param lats2: (array) - array of contour latitudes
:param lons2: (array) - array of contour longitudes
:param contour: (array) - list of values to contour with solid line
:param contour2: (array) - list of values to contour with dashed line
:param clabel: (boolean) - whether to show value on solid contours
:param clabel2: (boolean) - whether to show value on dashed contours
:param mask_land: (boolean) - whether to mask land
:param mask_ocean: (boolean) - whether to mask ocean
:param land: (boolean) - whether to color fill land
:param ocean: (boolean) - whether to color fill land
:param coastlines: (boolean) - whether to draw coastline
:param rivers: (boolean) - whether to draw rivers
:param countries: (boolean) - whether to draw country borders
:param states: (boolean) - whether to draw state borders
:param lakes: (boolean) - whether to color fill lakes
:param projection: (cartopy.crs) - projection of map
:param central_longitude: (scalar) - longitude to center the map on
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted line
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional image plot
"""
from ahh.ext import get_ocean_mask
import cartopy.util
if isinstance(data, xr.Dataset):
raise Exception('Please subselect a variable from xr.Dataset!')
if isinstance(data, xr.DataArray):
if lats is None:
lats = data.lat.values
if lons is None:
lons = data.lon.values
data = data.to_masked_array()
if isinstance(lons, xr.DataArray):
lons = lons.values
if isinstance(lons, xr.DataArray):
lats = lats.values
if lons is None or lats is None:
raise Exception('Missing lats and lons!')
if data2 is None:
data2 = data
ndim = data.ndim
if ndim > 2:
raise Exception('Data must be 2D, {0}D data was input!'.format(ndim))
if mask_ocean:
data, lons = get_ocean_mask(data, lats, lons, apply_mask=True)
elif mask_land:
data, lons = get_ocean_mask(data, lats, lons,
reverse=True, apply_mask=True)
projection = _get_projection_logic(projection, lons, central_longitude)
if lons2 is None and lats2 is None:
lats2, lons2 = lats, lons
else:
lons2 -= central_longitude
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2,
region=region,
central_longitude=
central_longitude)
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
if ax is None:
ax = plt.subplot(rows, cols, pos, projection=projection)
if wrap:
try:
data, lons = cartopy.util.add_cyclic_point(data, coord=lons)
except:
print('Unable to wrap!')
ax.set_extent([lon1, lon2, lat1, lat2], projection)
_add_features(ax, land, ocean, coastlines,
states, countries, lakes, rivers)
set_latlons(ax, central_longitude=central_longitude,
lat_labels=lat_labels, lon_labels=lon_labels)
if contourf:
try:
contourf[0]
base, base2 = _get_bases_logic(contourf)
vmin, vmax = _get_vmin_vmax_logic(data=contourf,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
if tick_locs is None:
tick_locs = contourf
except:
base, base2 = _get_bases_logic(data)
vmin, vmax = _get_vmin_vmax_logic(data=data,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
vmin, vmax = _balance_logic(balance, vmin, vmax)
if interval is None:
interval = base
oom = get_order_mag(np.abs(vmax) - np.abs(vmin))
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
try:
contourf[0]
except:
contourf = np.arange(vmin, vmax + interval, interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin,
vmax=vmax,
data=contourf,
interval=interval)
contourf, interval = _fix_contourf_logic(contourf=contourf,
interval=interval,
vmin=vmin,
vmax=vmax)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
cmap = get_cmap(cmap, n=len(contourf))
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin,
vmax=vmax,
interval=interval)
im = ax.contourf(lons, lats, data, levels=contourf, extend='both',
transform=projection, cmap=cmap,
vmin=vmin, vmax=vmax, **kwargs)
drawedges = True
else:
base, base2 = _get_bases_logic(data)
vmin, vmax = _get_vmin_vmax_logic(data=data,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
vmin, vmax = _balance_logic(balance, vmin, vmax)
cmap = get_cmap(cmap, n=100)
im = ax.pcolormesh(lons, lats, data, transform=projection,
cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
drawedges = False
if cbar:
set_cbar(ax, im, label=cbar_label, drawedges=drawedges,
shrink=shrink, orientation=orientation,
fmt=fmt, tick_locs=tick_locs)
if stipple:
ax.contourf(lons2, lats2, data2, stipple, colors='none',
hatches=['.', '.', ' '],
transform=projection, **kwargs)
_set_contour_logic(ax, lons2, lats2, data2, contour,
projection, fmt, clabel)
_set_contour_logic(ax, lons2, lats2, data2, contour2,
projection, fmt, clabel2)
set_labels(ax, title=title, title_pad=title_pad,
length_scale=length_scale, suptitle=suptitle)
set_borders(ax)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, im
else:
return ax
def plot_bounds(ax, lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None,
color='k', linestyle='solid', linewidth=1.25,
fill=False, alpha=0.75, projection=None,
tight_layout='on', dpi=DEFAULT['dpi'], save='',
close=True, **kwargs):
"""
Plot a bounded region on a map. Default is a rectangle with black outlines.
:param ax: (matplotlib.axes) - original axis
:param lat1: (float) - a latitudinal bound (can be any order)
:param lat2: (float) - another latitudinal bound (can be any order)
:param lon1: (float) - a longitudinal bound (can be any order)
:param lon2: (float) - another longitudinal bound (can be any order)
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param color: (str) - matplotlib abbrieviation of color
:param linestyle: (str) - solid, dashed, dashdot, or dotted linestyle
:param linewidth: (scalar) - how thick line is
:param fill: (boolean) - whether to color in the region
:param alpha: (float) - how transparent it is
:param projection: (cartopy.crs) - map projection
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - save figure if string is specified
:param kwargs: (kwargs) - additional keyword arguments
:param close: (boolean) - whether to close figure after saving
"""
projection = _get_projection_logic(projection)
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2)
width = lon2 - lon1
height = lat2 - lat1
ax.add_patch(mpatches.Rectangle(xy=[lon1, lat1],
width=width,
height=height,
facecolor=color,
edgecolor=color,
linestyle=linestyle,
linewidth=linewidth,
alpha=alpha,
transform=projection,
fill=fill, **kwargs
)
)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=1, rows=1, cols=1)
def plot_line(x, y=None, figsize=None,
ax=None, xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
color=COLORS['red'], alpha=ALPHAS['translucent'],
inherit=True, label='', xlabel='', ylabel='', title='',
suptitle=False,
title_pad=0.965, length_scale=True, linewidth=1, linestyle='-',
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, label_inline=False,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
projection=DEFAULT['projection'],
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw a line on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param color: (str) - color of the plotted line
:param alpha: (scalar/str) - transparency of the plotted line
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param linewidth: (scalar) - width of the plotted line
:param linestyle: (str) - style of the plotted line
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param label_inline: (scalar) - whether to label in line; x-value of label
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param projection: (cartopy.crs) - projection of plotted line
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted line
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional line plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x=x, y=y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_xlim, xlim = _get_xlim_logic(x, xlim)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos,
projection=projection)
plot = ax.plot(x, y, **kwargs)
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
plt.setp(plot, color=color, alpha=alpha, label=label,
linewidth=linewidth, linestyle=linestyle,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='round', dash_joinstyle='round')
# must be after label
if label is not None and label_inline:
if not isinstance(label_inline, bool):
set_inline_label(ax, plot, xval=label_inline)
else:
set_inline_label(ax, plot)
if projection is not None:
plt.setp(plot, transform=projection)
set_axes(ax, xlim=xlim, ylim=ylim,
xscale=xscale, yscale=yscale,
xinvert=xinvert, yinvert=yinvert)
# need ax and ylim set
_show_stats_logic(ax, y, stats)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
set_legend(ax, loc=legend)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot_bar(x, y=None, figsize=None, ax=None, xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
matchcolor=True, color=None, facecolor=COLORS['red'],
edgecolor=COLORS['red'], alpha=ALPHAS['semi opaque'],
linewidth=0.25, linestyle='-', title_pad=0.965, length_scale=True,
inherit=True, label='', xlabel='', ylabel='', title='',
suptitle=False,
width='auto', height=None, align='edge',
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, orientation='vertical',
sidebar_count=0, sidebar_pos=1, bar_vals=None,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw bars on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param color: (str) - facecolor and edgecolor of plotted bar
:param facecolor: (str) - facecolor of plotted bar
:param edgecolor: (str) - edgecolor of plotted bar
:param alpha: (scalar/str) - transparency of the plotted bar
:param linewidth: (scalar) - width of plotted bar edges
:param linestyle: (str) - style of the plotted bar edges
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param inherit: (boolean) - whether to inherit previous labels
:param length_scale: (scalar) - whether to scale the labels based on length
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param width: (str/scalar) - width of plotted bars when vertical
:param height: (str/scalar) - height of plotted bars when horizontal
:param align: (str) - whether to align plotted bar on center or edge
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param orientation: (str) - whether to have horizontal or vertical bars
:param sidebar_count: (int) - how many bars per x
:param sidebar_pos: (int) - the location of the side bar
:param bar_vals: (str) - format of bar vals
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted bar
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional bar plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi,
sidebar_pos=sidebar_pos)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x=x, y=y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
facecolor, edgecolor = _get_color_logic(color,
facecolor,
edgecolor,
matchcolor)
if width == 'auto':
width = _get_width_logic(x)
if sidebar_count > 1:
if facecolor is not COLORS['red']:
(width, align, x_list) = get_side_bars_recs(x,
sidebar_count,
colors=False)
else:
(width, align,
x_list, colors) = get_side_bars_recs(x,
sidebar_count,
colors=True)
if facecolor is COLORS['red']:
color = colors[sidebar_pos - 1]
x = x_list[sidebar_pos - 1]
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos)
# set width first
if xtext:
align = 'center'
origin_xlim, xlim = _get_xlim_logic(x, xlim, pad=width, align=align)
if sidebar_count > 1 and sidebar_count % 2 == 0:
xlim = (xlim[0] - width * sidebar_count,
xlim[1] + width * (sidebar_count - 1))
elif sidebar_count > 1 and sidebar_count % 2 != 0:
xlim = (xlim[0] - width * sidebar_count,
xlim[1])
if 'vertical' in orientation:
plot = ax.bar(x, y, align=align, label=label, **kwargs)
elif 'horizontal' in orientation:
plot = ax.barh(x, y, height=height, align=align,
label=label, **kwargs)
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha,
linestyle=linestyle, width=width, linewidth=linewidth)
set_axes(ax,
xlim=xlim,
ylim=ylim,
xscale=xscale,
yscale=yscale,
xinvert=xinvert,
yinvert=yinvert)
if bar_vals != False:
if sidebar_count == 0:
sidebar_count = 1
if (len(x) < (50 / sidebar_count * 1.7) and
sidebar_pos == sidebar_count):
if bar_vals is None:
interval = np.median(y)
bar_vals = _get_fmt_logic(fmt=bar_vals, interval=interval)
set_bar_vals(ax, fmt=bar_vals, orientation='auto',
yinvert=yinvert)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
set_legend(ax, loc=legend)
# need ax and ylim set and bar vals shifted
_show_stats_logic(ax, y, stats)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot_scatter(x, y=None, figsize=None, ax=None,
xlim=None, ylim=None,
stats=False,
norm=False, anom=False, norm_anom=False, cumsum=False,
matchcolor=True,
data_lim=None, vmin=None, vmax=None,
color=None, facecolor=COLORS['red'], edgecolor=COLORS['red'],
alpha=ALPHAS['translucent'],
linewidth=0.25, size=5, marker='o', s=None,
c=None, cbar=True, cbar_label='', shrink=0.35, cmap=None,
orientation='horizontal', interval=None, tick_locs=None,
inherit=True, label='', xlabel='', ylabel='',
title='', title_pad=0.965, suptitle=False, length_scale=True,
xscale='linear', yscale='linear', minor_date_ticks=True,
rows=1, cols=1, pos=1, fmt=None, pad=0.225,
sharex=None, sharey=None,
twinx=None, twiny=None, aligned=True,
xinvert=False, yinvert=False, legend=None,
projection=DEFAULT['projection'],
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw markers on a subplot. Use other functions for full customizability.
:param x: (arr) - input x array
:param y: (arr) - input y array
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param stats: (boolean/str) - whether to show stats and if str, the loc
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param norm: (boolean) - whether to normalize the y
:param anom: (boolean) - whether to subtract the average of y from y
:param norm_anom: (boolean) - whether to get the normalized anomaly of y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param matchcolor: (boolean) - whether to match edgecolor with facecolor
:param color: (str) - facecolor and edgecolor of plotted scatter marker
:param facecolor: (str) - facecolor of plotted scatter marker
:param edgecolor: (str) - edgecolor of plotted scatter marker
:param alpha: (scalar/str) - transparency of the plotted scatter marker
:param linewidth: (scalar) - width of plotted scatter marker edges
:param size: (scalar) - size of plotted scatter marker
:param marker: (scalar) - style of plotted scatter marker
:param s: (arr) - array to map size to
:param c: (arr) - array to map color to
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - size of color bar
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param minor_date_ticks: (str) - whether to have date ticks on top axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param pad: (scalar) - padding of color bar from plot
:param sharex: (mpl.axes) - share x axis ticks with another subplot
:param sharey: (mpl.axes) - share y axis ticks with another subplot
:param twinx: (mpl.axes) - share x axis and have another y axis
:param twiny: (mpl.axes) - share x axis and have another x axis
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param legend: (str) - location of legend
:param projection: (cartopy.crs) - projection of plotted scatter
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted scatter
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional scatter plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
x = _get_dt_from_pd_logic(x)
x, xtext, xticklabels = _get_xtext_logic(x=x)
x, y = _get_x_to_y_logic(x, y)
y = _get_stats_logic(ax, y, norm=norm, anom=anom,
norm_anom=norm_anom, cumsum=cumsum)
origin_ylim, ylim = _get_ylim_logic(y, ylim)
origin_xlim, xlim = _get_xlim_logic(x, xlim)
ax, rows, cols = _get_ax_logic(ax=ax, twinx=twinx, twiny=twiny,
rows=rows, cols=cols, pos=pos,
projection=projection)
if c is not None:
base, base2 = _get_bases_logic(c)
vmin, vmax = _get_vmin_vmax_logic(data=c, base=base2,
vmin=vmin, vmax=vmax,
data_lim=data_lim)
oom = get_order_mag(vmax - vmin)
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=c,
interval=interval)
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin, vmax=vmax,
interval=interval)
if cmap is None:
cmap = 'viridis'
cmap = get_cmap(cmap, cbar_count)
edgecolor = None
facecolor = COLORS['gray']
if s is not None:
size = np.abs(s)
else:
size = scale_it(ax, np.abs(size), 25, exp=False)
plot = ax.scatter(x, y, marker=marker,
linewidths=linewidth,
s=size, c=c, cmap=cmap,
vmin=vmin, vmax=vmax,
**kwargs
)
if cbar and cmap is not None:
set_cbar(ax, plot, label=cbar_label, fmt=fmt,
pad=pad, shrink=shrink,
tick_size=8, label_size=10,
orientation=orientation,
tick_locs=tick_locs)
else:
if color is not None:
facecolor = color
edgecolor = color
if matchcolor:
edgecolor = facecolor
if inherit:
ax, xlabel, ylabel, title, xlim, ylim = \
set_inherited(ax, xlabel, ylabel, title,
xlim, ylim, origin_xlim, origin_ylim)
linewidth = scale_it(ax, linewidth, 0.2)
if projection is not None:
plt.setp(plot, transform=projection)
plt.setp(plot, facecolor=facecolor, edgecolor=edgecolor,
alpha=alpha, label=label)
set_axes(ax, xlim=xlim, ylim=ylim,
xscale=xscale, yscale=yscale,
xinvert=xinvert, yinvert=yinvert)
# need ax and ylim set
_show_stats_logic(ax, y, stats)
_settings_logic(ax=ax,
x=x,
twinx=twinx,
twiny=twiny,
xticks=None,
xlabel=xlabel,
ylabel=ylabel,
title=title,
title_pad=title_pad,
suptitle=suptitle,
aligned=aligned,
length_scale=length_scale,
xtext=xtext,
xticklabels=xticklabels,
minor_date_ticks=minor_date_ticks)
rows, cols = _set_share_logic(ax=ax, rows=rows, cols=cols,
sharex=sharex, sharey=sharey,
xlabel=xlabel, ylabel=ylabel)
set_legend(ax, loc=legend)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, plot
else:
return ax
def plot(*plot_args, **plot_kwargs):
"""
Plot multiple line/bar/scatter plots at once using this syntax
x, y, 'label', 'ptype/color/linestyle/marker'
Example - plot a red dashed line with circle marker and a black bar plot
plot(x, y, 'line plot', 'line/red/--/o', x2, y2, 'bar plot', 'bar/black')
Equivalent shorthand
plot(x, y, 'line plot', 'l/r/--/o', x2, y2, 'bar plot', 'b/k')
Example 2 - plot a green solid line, blue bar plot, yellow scatter plot
with a title, ylabel, and xlabel
plot(x, y, 'labl', 'l/r', x2, y2, 'labl2', 'b/b', x3, y3, 'labl3', 's/y',
title='title', ylabel='a ylabel', xlabel='one xlabel')
Example 3 - adjust figsize while still stacking all the plots
plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', figsize=(8, 5), stack=True)
Example 4 - plot two separate figures
plot(x, y, 'labl', 'l', x2, y2, 'labl2', 'b', stack=False)
:param stack: (bool) whether to keep stacking if figsize input is provided
:return ax_list: (list) - list of axes
"""
plot_inputs = zip(plot_args[::4],
plot_args[1::4],
plot_args[2::4],
plot_args[3::4])
figsize = plot_kwargs.get('figsize', 'na')
stack = plot_kwargs.get('stack', True)
if figsize == 'na':
set_figsize()
ax_list = []
for i, plot_input in enumerate(plot_inputs):
if stack and i > 0:
plot_kwargs['figsize'] = 'na'
x, y, label, style = plot_input
ptype, color, linestyle, marker = _parse_style(style)
vis_dict = dict(label=label, color=color,
linestyle=linestyle, marker=marker,
**plot_kwargs)
if ptype in ['b', 'bar']:
_pop_keys(vis_dict, 'bar')
ax = plot_bar(x, y, **vis_dict)
elif ptype in ['s', 'scatter']:
_pop_keys(vis_dict, 'scatter')
if vis_dict['marker'] == '':
vis_dict['marker'] = 'o'
ax = plot_scatter(x, y, **vis_dict)
else:
_pop_keys(vis_dict, 'line')
ax = plot_line(x, y, **vis_dict)
ax_list.append(ax)
return ax_list
def plot_hist(x=None, y=None, ptype='bar', align='edge', bar_vals=None,
width='auto', norm=False, cumsum=False, **kwargs):
"""
Plot histogram using plot line/bar/scatter.
:param x: (int/arr) - number of bins or array of bin edges
:param y: (arr) - array of items
:param ptype: (str) - whether to plot line, bar, or scatter
:param align: (str) - whether to align bars on edge or center
:param bar_vals: (str) - format of bar vals
:param width: (str/scalar) - width of plotted bars when vertical
:param norm: (boolean) - whether to normalize the y
:param cumsum: (boolean) - whether to take the cumulative sum of y
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
if y is None:
y = x
x = None
try:
int(x)
refresh_x = x + 1
except:
refresh_x = 0
if norm:
weights = np.ones_like(y) / float(len(y))
normed = 0
else:
weights = None
normed = False
try:
if x is None or refresh_x:
if not refresh_x:
ymin = np.min(y)
ymax = np.max(y)
oom = get_order_mag(ymax - ymin)
base = np.power(5, oom)
ymin = round_to(ymin, base=base)
ymax = round_to(ymax, base=base)
x = np.arange(ymin, ymax, base)
if ymin == ymax or refresh_x:
ymin = np.min(y) # refresh it
ymax = np.max(y)
if refresh_x == 0:
refresh_x += 7
x = np.linspace(ymin, ymax, refresh_x)
y = np.clip(y, np.min(x), np.max(x))
hist_counts, bin_edges = np.histogram(y, x,
normed=normed,
weights=weights)
x, y = bin_edges[:-1], hist_counts
if width == 'auto':
width = np.average(np.diff(x))
except:
text_hist = Counter(y)
y = list(text_hist.values())
x = list(text_hist.keys())
align = 'center'
if bar_vals is None:
if not norm:
bar_vals = '%1d'
else:
bar_vals = '%.2f'
if ptype == 'bar':
plot_bar(x, y, align=align, width=width, bar_vals=bar_vals,
cumsum=cumsum, **kwargs)
elif ptype == 'scatter':
plot_scatter(x, y, cumsum=cumsum, **kwargs)
else:
plot_line(x, y, cumsum=cumsum, **kwargs)
def plot_heatmap(df, figsize=None, ax=None, mask=None, mask2=None,
size=12, cmap='RdBu_r', orientation='vertical',
edgecolor=COLORS['black'],
xrotation=0, yrotation=0,
data_lim=None, vmin=None, vmax=None,
inherit=True, label='', xlabel='', ylabel='',
title='', title_pad=1.025, suptitle=False, length_scale=True,
xticklabels=None, yticklabels=None,
rows=1, cols=1, pos=1, fmt=None, pad=0.3,
cbar=True, cbar_label='', shrink=0.2,
interval=None, tick_locs=None,
xinvert=False, yinvert=True,
tight_layout='auto', dpi=DEFAULT['dpi'],
save='', close=True, returnplot=False, **kwargs):
"""
Draw a heatmap on a subplot. Use other functions for full customizability.
:param df: (pd.DataFrame) - dataframe to be converted into heatmap
:param mask: (pd.DataFrame) - dataframe containing booleans to show text
:param mask2: (pd.DataFrame) - dataframe containing booleans to show text
:param size: (scalar) - size of text over masks
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param cmap: (str) - color map
:param orientation: (str) - orientation of color bar
:param data_lim: (tup) - shortcut for vmin and vmax
:param vmin: (scalar) - lower limit of color bar
:param vmax: (scalar) - upper limit of color bar
:param xrotation: (scalar) - degrees to rotate x major tick labels
:param yrotation: (scalar) - degrees to rotate y major tick labels
:param inherit: (boolean) - whether to inherit previous labels
:param label: (str) - label of line to be used in legend
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param title_pad: (scalar) - distance between box and title
:param suptitle: (boolean) - whether to make a figure title
:param length_scale: (scalar) - whether to scale the labels based on length
:param xticklabels: (list) - manually set x major tick labels
:param yticklabels: (list) - manually set y major tick labels
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param fmt: (str) - format of color bar labels
:param pad: (scalar) - padding of color bar
:param cbar: (boolean) - whether to show color bar
:param cbar_label: (str) - label of color bar
:param shrink: (scalar) - size of color bar
:param interval: (scalar) - interval of tick marks on color bar
:param tick_locs: (array) - input own tick marks on color bar
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:param returnplot: (boolean) - whether to return plotted heatmap
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
:return plot: (mpl.axes) - optional line plot
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
if ax is None:
ax = plt.subplot(rows, cols, pos)
base, base2 = _get_bases_logic(df)
vmin, vmax = _get_vmin_vmax_logic(data=df,
base=base2,
vmin=vmin,
vmax=vmax,
data_lim=data_lim)
oom = get_order_mag(vmax - vmin)
interval = _get_interval_logic(interval=interval,
vmin=vmin, vmax=vmax,
base=base, oom=oom)
fmt = _get_fmt_logic(fmt=fmt, interval=interval)
vmin, vmax = _fix_vmin_vmax_logic(vmin=vmin, vmax=vmax, data=df,
interval=interval)
(tick_locs,
cbar_count) = _get_tick_locs_cbar_count_logic(tick_locs=tick_locs,
vmin=vmin, vmax=vmax,
interval=interval)
cmap = get_cmap(cmap, cbar_count)
im = ax.pcolor(df,
cmap=cmap,
vmin=vmin,
vmax=vmax,
edgecolors=edgecolor,
**kwargs)
ax.set_yticks(np.arange(df.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(df.shape[1]) + 0.5, minor=False)
ax.patch.set(hatch='+',
edgecolor=COLORS['gray'],
color=COLORS['gray'],
alpha=0.45, lw=0.25)
if xinvert:
ax.invert_yaxis()
if yinvert:
ax.invert_yaxis()
if xticklabels is None:
xticklabels = df.columns
if yticklabels is None:
yticklabels = df.index
set_major_tick_labels(ax,
xticklabels=xticklabels,
yticklabels=yticklabels,
xrotation=xrotation,
yrotation=yrotation)
set_labels(ax, xlabel=xlabel, ylabel=ylabel, suptitle=suptitle,
title=title, title_pad=title_pad, length_scale=length_scale)
ax.grid(False)
if cbar:
set_cbar(ax, im, label=cbar_label, fmt=fmt,
pad=pad, shrink=shrink,
tick_size=8, label_size=10,
orientation=orientation,
tick_locs=tick_locs)
df_nan = np.ma.masked_invalid(df)
if mask is not None:
_set_heatmap_mask(ax, df_nan, mask, size)
if mask2 is not None:
_set_heatmap_mask(ax, df_nan, mask2, size)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
if returnplot:
return ax, im
else:
return ax
def plot_cbar(cmap,
fig=None,
left=0.05,
bottom=0.95,
width=0.95,
height=0.05,
label='',
fmt='%1.0f',
label_size=12,
drawedges=True,
label_color=COLORS['gray'],
ticks=None,
boundaries=None,
tick_size=8,
tick_color=COLORS['gray'],
color=COLORS['black'],
pad=0.075,
aspect=25.5,
shrink=0.2,
length=0,
tick_width=0.25,
direction='out',
orientation='horizontal',
cax=None,
**kwargs):
"""
Plot lone color bar.
:param cmap: (list/str) - a list containing RGB or Python/NCL cmap name
:param fig: (boolean) - input figure
:param left: (scalar) - left padding from figure edge
:param bottom: (scalar) - bottom padding from figure left edge
:param width: (scalar) - percent width of figure
:param height: (scalar) - percent height of figure
:param fmt: (str) - format of color bar labels
:param label_size: (scalar) - size of color bar label
:param label_color: (scalar) - color of color bar label
:param ticks: (array) - input own tick marks on color bar
:param tick_size: (scalar) - size of color bar tick labels
:param tick_color: (scalar) - color of color bar tick labels
:param color: (scalar) - color of color bar tick marks
:param drawedges: (scalar) - whether to draw color edges
:param pad: (scalar) - padding of color bar from plot
:param aspect: (int) - aspect ratio of color bar
:param shrink: (scalar) - size of color bar
:param length: (scalar) - length of color bar tick marks
:param tick_width: (scalar) - width of color bar tick marks
:param direction: (str) - direction of color bar tick marks
:param orientation: (str) - orientation of color bar
:param cax: (mpl.axes) - plot axis to attach to
:param kwargs: (kwargs) - additional keyword arguments
:return cbar: (mpl.ColorBar) - matplotlib color bar
"""
if fig is None:
fig = set_figsize(8, 4)
if boundaries is None and ticks is not None:
boundaries = ticks
ax = fig.add_axes([left, bottom, width, height])
cmap = get_cmap(cmap)
cbar = mpl.colorbar.ColorbarBase(ax, ticks=ticks,
boundaries=boundaries,
cmap=cmap,
orientation=orientation)
cbar.ax.tick_params(labelsize=tick_size,
direction=direction,
length=length,
width=tick_width,
tick2On=True,
labelcolor=label_color,
color=color)
cbar.set_label(label,
size=label_size,
color=label_color)
return cbar
def init_map(lat1=-90, lat2=90, lon1=-180, lon2=180,
latlim=None, lonlim=None, region=None,
rows=1, cols=1, pos=1, figsize=None, ax=None,
title='', suptitle=False,
length_scale=True, lat_labels='auto', lon_labels='auto',
projection=DEFAULT['projection'], central_longitude=0,
land=False, ocean=False, lakes=True,
coastlines=True, states=True, countries=True, rivers=False,
tight_layout='auto', dpi=DEFAULT['dpi'], save='', close=True):
"""
Initialize a projected map.
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param latlim: (tuple) shortcut for lat1 and lat2
:param lonlim: (tuple) shortcut for lon1 and lon2
:param region: (str) region to quickly subset lat and lon extent (na or us)
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param ax: (mpl.axes) - plot axis
:param title: (str) - title of subplot
:param length_scale: (scalar) - whether to scale the labels based on length
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param projection: (cartopy.crs) - projection of map
:param central_longitude: (scalar) - longitude to center the map on
:param land: (boolean) - whether to color fill land
:param ocean: (boolean) - whether to color fill land
:param lakes: (boolean) - whether to color fill lakes
:param coastlines: (boolean) - whether to draw coastline
:param states: (boolean) - whether to draw state borders
:param countries: (boolean) - whether to draw country borders
:param rivers: (boolean) - whether to draw rivers
:param tight_layout: (str) - on or auto adjust layout of subplots
:param dpi: (int) - dots per inch to save the figure
:param save: (str) - if filename is input, will save an image file
:param close: (boolean) - whether to close figure after saving
:return ax: (mpl.axes) - plot axis
"""
_set_figsize_logic(figsize=figsize, rows=rows,
cols=cols, pos=pos, dpi=dpi)
projection = _get_projection_logic(projection)
if ax is None:
ax = plt.subplot(rows, cols, pos, projection=projection)
lat1, lat2, lon1, lon2 = _get_lat_lon_lim_logic(latlim, lonlim,
lat1, lat2, lon1, lon2,
region=region,
central_longitude=
central_longitude)
ax.set_extent([lon1, lon2, lat1, lat2], projection)
_add_features(ax, land, ocean, coastlines,
states, countries, lakes, rivers)
set_latlons(ax,
lat_labels=lat_labels, lon_labels=lon_labels,
central_longitude=central_longitude)
set_labels(ax, title=title, length_scale=length_scale)
_save_logic(save=save, tight_layout=tight_layout, close=close,
dpi=dpi, pos=pos, rows=rows, cols=cols)
return ax
def get_side_bars_recs(x, sidebar_count, colors=True):
"""
Output some recommended values to show side by side bars.
:param x: (arr) - input x array
:param sidebar_count: (int) - how many bars side by side
:param colors: (boolean) - whether to return colors
:return width: (scalar) - adjusted width of color bars
:return align: (str) - edge or center based on sidebar_count
:return x_list: (list) - adjusted x values
:return colors: (list) - list of colors
"""
if sidebar_count == 0:
raise IOError('Unable to have 0 side bars per x!')
if sidebar_count == 1:
if colors:
return 0.833333333, 'center', [x], [COLOR_LIST[0]]
else:
return 0.833333333, 'center', [x]
if sidebar_count % 2 == 0:
align = 'edge'
else:
align = 'center'
width = _get_width_logic(x) / sidebar_count
x_shift_end = sidebar_count // 2
x_shift_start = -(sidebar_count - x_shift_end)
x_shifts = np.arange(x_shift_start, x_shift_end)
if align is 'center':
extra_x_shift = len(x_shifts) // 2 + 1
x_shifts += extra_x_shift
x_list = []
for x_shift in x_shifts:
try:
x_list.append(mdates.date2num(x) + width * x_shift)
except:
x_list.append(x + width * x_shift)
if colors:
colors = COLOR_LIST[0:sidebar_count]
return width, align, x_list, colors
else:
return width, align, x_list
def set_bar_vals(ax, size=7.5,
color=COLORS['black'],
alpha=ALPHAS['translucent'],
orientation='auto',
inherit_color=False,
pad_remover=1,
fmt='%d',
yinvert=False):
"""
Label the rectangles in bar plots with its respective values.
Adaptation of: "http://composition.al/blog/2015/11/29/a-better-way-to-\
add-labels-to-bar-charts-with-matplotlib/"
:param ax: (mpl.axes) - plot axis
:param size: (scalar) - size of bar labels
:param color: (str) - color of bar labels
:param alpha: (scalar/str) - transparency of bar labels
:param orientation: (str) - orientation of the labels
:param inherit_color: (boolean) - whether to inherit color for labels
:param pad_remover: (scalar): - space to remove between ylim and labels
:param fmt: (str) - format of color bar labels
:param yinvert (boolean) - whether to invert the y values of labels
:return ax: (mpl.axes) - plot axis
"""
try:
pad_remover = scale_it(ax, pad_remover, 0.1, exp=True)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
y_height = ymax - ymin
rects = ax.patches
size = scale_it(ax, size, 1, exp=True) / np.log(len(rects))
if len(rects) > 5:
size *= 3
if orientation is 'auto':
if len(str(int(ymax))) > 2:
orientation = 'vertical'
else:
orientation = 'horizontal'
if orientation is 'vertical':
rotation = 90
height_mult = 0.02
limit_mult = 2
else:
rotation = 0
height_mult = 0.015
limit_mult = 1
pos_ct = 1 # to dampen future
neg_ct = 1 # ylim increases
orient_add = 0
for rect in rects:
x = plt.getp(rect, 'x')
y = rect.get_height()
if plt.getp(ax, 'yscale') is 'log':
label_position = y
if orientation is 'vertical':
label_position += y / 50
else:
label_position = y + (y_height * height_mult)
if y < 0:
va = 'top'
if orientation is 'horizontal':
orient_add = label_position / 60
label_position += (y_height * -2 * height_mult)
else:
va = 'bottom'
if label_position >= (ymax - ymax / 5):
ymax += (ymax * pad_remover / 6.5 /
pos_ct * limit_mult + orient_add)
pos_ct += 15
if label_position <= (ymin - ymin / 5):
ymin += (ymin * pad_remover / 8 /
neg_ct * limit_mult + orient_add)
neg_ct += 15
if inherit_color:
color = plt.getp(rect, 'facecolor')
ax.set_ylim(ymin, ymax)
if yinvert:
label_position *= -1
if (ymin <= y < ymax) and (xmin < x < xmax):
ax.text(rect.get_x() + rect.get_width() / 2., label_position,
fmt % y, size=size, alpha=alpha, color=color,
ha='center', va=va, rotation=rotation)
except:
print('Unable to set bar vals!')
return ax
def set_inline_label(ax, line, label=None,
xval=None, size=6, alpha=ALPHAS['translucent'],
color=None, ha='center', va='center',
bbox=dict(facecolor=COLORS['white'],
edgecolor=COLORS['white'],
alpha=ALPHAS['transparent']),
**kwargs):
"""
Automatically adds an inline label to line
https://github.com/cphyc/matplotlib-label-lines
:param ax: (mpl.axes) - plot axis
:param line: (mpl.Line2D) - line to be labeled
:param label: (str) - label of line
:param xval: (scalar) - x value of label; defaults to median
:param size: (scalar) - size of label
:param alpha: (scalar) - opacity of label
:param ha: (str) - horizontal alignment of label
:param va: (str) - vertical alignment of label
:param bbox: (dict) - dictionary of box surrounding label
:param kwargs: (kwargs) - additional keyword arguments
"""
if isinstance(line, list):
line = line[0]
xdata = line.get_xdata()
ydata = line.get_ydata()
try:
if xval is None:
xval = np.median(xdata)
except:
xval = xdata[int(len(xdata) / 2)]
if isinstance(xval, datetime.datetime):
xdata = pd.to_datetime(xdata).to_pydatetime()
elif isinstance(xval, str):
xval = pd.to_datetime(xval).to_pydatetime()
xdata = pd.to_datetime(xdata).to_pydatetime()
x_idx = np.where(xdata == xval)[0]
if not x_idx:
print('xval outside range of x in set_label_inline!')
return
yval = ydata[x_idx]
if not label:
label = line.get_label()
size = scale_it(ax, size, 2, exp=True)
try:
if xval is None:
xval = np.median(xdata)
except:
xval = xdata[int(len(xdata) / 2)]
if color is None:
color = plt.getp(line, 'color')
ax.text(xval, yval, label,
color=color,
alpha=alpha,
size=size,
ha=ha,
va=va,
bbox=bbox,
**kwargs
)
def annotate_point(ax, x, y, label='', xytext=(0, 0),
size=SIZES['marker']['smaller'],
textcoords='offset points', transform=False,
projection=DEFAULT['projection'],
bbox=dict(boxstyle='round, pad=0.3',
facecolor=COLORS['black'],
alpha=ALPHAS['transparent']),
**kwargs
):
"""
Annotate a point on a subplot.
:param ax: (mpl.axes) - plot axis
:param x: (scalar) - input x location to annotate
:param y: (scalar) - input y location to annotate
:param label: (str) - label of line to be used in legend
:param xytext: (tup) - x, y offset from input x and y for annotation
:param size: (scalar) - size of annotation
:param textcoords: (str) - type of coordinates
:param transform: (boolean) - whether to use input projection
:param projection: (cartopy.crs) - projection of plotted scatter
:param bbox: (dict) - dictionary of boxstyle, facecolor, and alpha of box
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
if transform:
x, y = ax.projection.transform_point(x, y, src_crs=projection)
ax.annotate(label, xy=(x, y), xytext=xytext, ha='left', va='center',
textcoords=textcoords, size=size, bbox=bbox, **kwargs)
return ax
def set_figsize(width=None, height=None, figsize='wide',
rows=1, cols=1, pos=1, dpi=DEFAULT['dpi'], **kwargs):
"""
Set figure size; can be wide, tall, auto, or input tuple.
:param width: (scalar) - width of figure
:param height: (scalar) - height of figure
:param figsize: (str/tup) - wide/tall/auto or tuple width x height of fig
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param dpi: (int) - dots per inch to save the figure
:param kwargs: (kwargs) - additional keyword arguments
"""
if width is not None and height is not None:
figsize = (width, height)
else:
if figsize is 'wide' and pos == 1:
fig_width = 10 + rows * 1.75
fig_height = 3.5 + cols * 1.25
figsize = (fig_width, fig_height)
elif figsize is 'tall' and pos == 1:
fig_width = 3.5 + rows * 1.25
fig_height = 12 + cols * 1.75
figsize = (fig_width, fig_height)
elif figsize is 'auto' and pos == 1:
fig_width = 8 + rows * 1.5
fig_height = 4.5 + cols * 1.5
figsize = (fig_width, fig_height)
if isinstance(figsize, tuple):
fig = plt.figure(figsize=figsize, dpi=dpi, **kwargs)
return fig
def set_ax(rows=1, cols=1, pos=1, **kwargs):
"""
Create plot axis
:param rows: (int) - number of rows for subplots
:param cols: (int) - number of columns for subplots
:param pos: (int) - position of current subplot
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
return plt.subplot(rows, cols, pos, **kwargs)
def set_date_ticks(ax, minor_date_ticks=True):
"""
Use logic on the length of date range to decide the tick marks.
:param ax: (mpl.axes) - plot axis
:param minor_date_ticks: (boolean) - whether to show the top date ticks
:return major_xlocator: (str) - locator of major tick
:return major_xinterval: (str) - interval between each major tick
:return major_xformatter: (str) - formatter of major tick
:return minor_xlocator: (str) - locator of minor tick
:return minor_xinterval: (str) - interval between each minor tick
:return minor_xformatter: (str) - formatter of minor tick
:return dt_bool: (boolean) - whether the x axis is datetimes
"""
geom = plt.getp(ax, 'geometry')
nrows = geom[0]
ncols = geom[1]
xlim = plt.getp(ax, 'xlim')
if xlim[0] < 700000:
dt_bool = False
return [None] * 6 + [dt_bool]
else:
dt_bool = True
xlim_dts = mdates.num2date(xlim)
dt_dict = td2dict(xlim_dts[-1] - xlim_dts[0])
ndays = dt_dict['days']
if ndays < 0:
dt_dict = td2dict(xlim_dts[0] - xlim_dts[-1])
ndays = dt_dict['days']
if ndays > 10950:
major_xlocator = 'years'
major_xformatter = '%Y'
major_xinterval = int(ndays / 2000)
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'years'
minor_xformatter = '\'%y'
minor_xinterval = int(ndays / 8000)
minor_xshow = int(ndays / 8000)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xinterval / 1.75)
if minor_xshow <= minor_xinterval:
minor_xshow += 1
elif 3000 < ndays <= 10950:
major_xlocator = 'years'
major_xformatter = '%Y'
major_xinterval = 1 + int(ndays / 3000)
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'years'
minor_xformatter = '\'%y'
minor_xinterval = 1 + int(ndays / 3300)
minor_xshow = 1 + int(ndays / 3300)
if major_xinterval >= minor_xinterval:
minor_xinterval -= 1
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.3)
if minor_xshow == 0:
minor_xshow = 1
elif 1825 < ndays <= 3000:
major_xlocator = 'months'
major_xformatter = '%B'
major_xinterval = 10 + int(ndays / 1850)
major_xlocator2 = 'months'
major_xformatter2 = '%Y'
major_xinterval2 = 8
minor_xlocator = 'months'
minor_xformatter = '%b'
minor_xinterval = 1 + int(ndays / 600)
minor_xshow = 1 + int(ndays / 725)
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 10):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 += 1
else:
break
elif 217 < ndays <= 1825:
major_xlocator = 'months'
major_xformatter = '%b %d'
major_xinterval = 3 + int(ndays / 1000) * 2
major_xlocator2 = 'months'
major_xformatter2 = '%Y'
major_xinterval2 = 4 + int(ndays / 800)
minor_xlocator = 'months'
minor_xformatter = '%b'
minor_xinterval = 1 + int(ndays / 600)
minor_xshow = 1 + int(ndays / 725)
if minor_xshow >= minor_xinterval / 2:
minor_xshow -= int(minor_xshow / 1.5)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 10):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 += 1
else:
break
elif 6 < ndays <= 217:
major_xlocator = 'days'
major_xformatter = '%b %d'
major_xinterval = 2 + int(ndays / 15) * 2
major_xlocator2 = None
major_xformatter2 = None
major_xinterval2 = None
minor_xlocator = 'days'
minor_xformatter = '%d'
minor_xinterval = 1 + int(ndays / 50)
minor_xshow = 1 + int(ndays / 35)
if minor_xshow >= minor_xinterval:
minor_xshow -= int(minor_xshow / 2.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
elif 1 < ndays <= 6:
major_xlocator = 'hours'
major_xformatter = '%H:%M'
major_xinterval = ndays * 5
major_xlocator2 = 'hours'
major_xformatter2 = '%m/%d'
major_xinterval2 = 24
minor_xlocator = 'hours'
minor_xformatter = '%H'
minor_xinterval = int(ndays / 1.5)
minor_xshow = 1 + int(minor_xinterval / 2)
if minor_xshow >= minor_xinterval:
minor_xshow -= int(minor_xshow / 2.25)
for i in range(0, 10):
if major_xinterval % minor_xinterval != 0:
major_xinterval += 1
else:
break
for i in range(0, 25):
if (major_xinterval2 % major_xinterval != 0
or major_xinterval2 == 0):
major_xinterval2 -= 1
else:
break
if minor_xshow <= minor_xinterval:
minor_xshow += 1
elif 0 <= ndays <= 1:
nminutes = (dt_dict['days'] * 1440
+ dt_dict['hours'] * 60
+ dt_dict['minutes']
)
major_xlocator = 'minutes'
major_xformatter = '%I:%M %p'
major_xinterval = int(nminutes / 3)
major_xlocator2 = 'minutes'
major_xformatter2 = '%b %d'
major_xinterval2 = int(nminutes / 1.5)
minor_xlocator = 'minutes'
minor_xformatter = '%H:%M'
minor_xinterval = int(nminutes / 12)
minor_xshow = 1
if minor_xshow >= 3 and major_xlocator != 'years':
minor_xshow = int(minor_xshow / 1.5)
elif minor_xshow >= 3 and major_xlocator == 'years':
minor_xshow -= int(minor_xshow / 1.5)
if nminutes > 360:
major_xinterval = round_to(major_xinterval, base=15)
minor_xinterval = round_to(minor_xinterval, base=15)
major_xinterval2 = round_to(major_xinterval2, base=15)
if major_xinterval % minor_xinterval != 0:
minor_xinterval = int(major_xinterval / 3)
for i in range(0, 60):
if major_xinterval % minor_xinterval != 0:
minor_xinterval += 1
else:
break
if major_xinterval2 % major_xinterval != 0:
major_xinterval2 = major_xinterval
if minor_xshow <= 0:
minor_xshow = 1
if major_xinterval2 is not None:
if major_xinterval2 <= 0:
major_xinterval2 = major_xinterval
set_major_ticks(ax,
xlocator=major_xlocator,
xformatter=major_xformatter,
xinterval=major_xinterval)
set_major_tick_labels(ax, size=8)
ax2 = ax.twiny()
ax2.set_xlim(ax.get_xlim())
prettify_ax(ax2, ticks=False)
if major_xlocator2 is not None and nrows == 1:
set_major_ticks(ax2,
xlocator=major_xlocator2,
xformatter=major_xformatter2,
xinterval=major_xinterval2)
set_major_tick_labels(ax2, bottom=True, top=False,
pad=24, size=6)
else:
set_major_tick_labels(ax2, xticklabels=[])
set_major_ticks(ax2, xticks=[])
if minor_date_ticks:
set_minor_ticks(ax2,
xlocator=minor_xlocator,
xformatter=minor_xformatter,
xinterval=minor_xinterval,
top=True, bottom=False)
set_minor_tick_labels(ax2, top=True, size=7.5)
set_minor_grid(ax2, xalpha=0.25)
for label in ax2.get_xminorticklabels():
label.set_visible(False) # find a better way?
for label in ax2.get_xminorticklabels()[0::minor_xshow * ncols]:
label.set_visible(True)
return (major_xlocator, major_xinterval, major_xformatter,
minor_xlocator, minor_xinterval, minor_xformatter, dt_bool)
def set_cbar(ax, im,
fig=False,
label='',
fmt='%1.0f',
label_size=7.5,
drawedges=True,
label_color=COLORS['gray'],
tick_locs=None,
tick_size=5,
tick_color=COLORS['gray'],
color=COLORS['black'],
pad=0.1,
aspect=25.5,
shrink=0.2,
length=0,
width=0.25,
direction='out',
orientation='horizontal',
cax=None,
**kwargs):
"""
Set color bar for a map.
:param ax: (mpl.axes) - plot axis
:param im: (mpl.collections/contour) - plotted map
:param fig: (boolean) - whether to plot a figure wide colorbar
:param fmt: (str) - format of color bar labels
:param label_size: (scalar) - size of color bar label
:param label_color: (scalar) - color of color bar label
:param tick_locs: (array) - input own tick marks on color bar
:param tick_size: (scalar) - size of color bar tick labels
:param tick_color: (scalar) - color of color bar tick labels
:param color: (scalar) - color of color bar tick marks
:param drawedges: (scalar) - whether to draw color edges
:param pad: (scalar) - padding of color bar from plot
:param aspect: (int) - aspect ratio of color bar
:param shrink: (scalar) - size of color bar
:param length: (scalar) - length of color bar tick marks
:param width: (scalar) - width of color bar tick marks
:param direction: (str) - direction of color bar tick marks
:param orientation: (str) - orientation of color bar
:param cax: (mpl.axes) - plot axis to attach to
:param kwargs: (kwargs) - additional keyword arguments
:return cbar: (mpl.ColorBar) - matplotlib color bar
"""
try:
pad = scale_it(ax, pad, 0.00075, exp=True)
label_size = scale_it(ax, label_size, 1.25, exp=True)
tick_size = scale_it(ax, tick_size, 1.25, exp=True)
width = scale_it(ax, width, 0.05, exp=True)
shrink = scale_it(ax, shrink, 0.075)
aspect = scale_it(ax, aspect, 1.25)
geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')
nrows = geom[0]
ncols = geom[1]
shrink *= (nrows + 0.5) / 1.5
tick_size += (nrows + ncols)
if orientation == 'vertical':
shrink *= 2
pad /= 3
if fmt == '%.2f':
rotation = 45
else:
rotation = 0
try:
if not fig:
cbar = plt.colorbar(im, orientation=orientation,
pad=pad,
drawedges=drawedges,
shrink=shrink,
format=fmt,
ticks=tick_locs,
aspect=aspect,
cax=cax,
**kwargs)
else:
figure = plt.getp(ax, 'figure')
cbar = figure.colorbar(im, ax=plt.getp(figure, 'axes'),
orientation=orientation,
pad=pad,
drawedges=drawedges,
shrink=shrink * 1.75,
format=fmt,
ticks=tick_locs,
aspect=aspect,
cax=cax,
**kwargs)
except:
cbar = plt.colorbar(im,
orientation=orientation,
drawedges=drawedges,
format=fmt,
ticks=tick_locs,
cax=cax,
**kwargs)
cbar.ax.tick_params(labelsize=tick_size,
rotation=rotation,
direction=direction,
length=length,
width=width,
tick2On=True,
labelcolor=label_color,
color=color)
cbar.set_label(label, size=label_size, color=label_color)
return cbar
except:
report_err(comment='Could not set color bar; please set manually!')
def get_cmap(colors, n=None, r=False, start=0, stop=1, **kwargs):
"""
Converts a list of colors into a color map or discretizes a registered cmap
http://matplotlib.org/examples/color/colormaps_reference.html
http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml
:param colors: (list/str) - a list containing RGB or Python/NCL cmap name
:param n: (int) - number of colors in cmap
:param r: (boolean) - reverse colormap
:param start: (scalar) - value to start on the cmap between 0 and 1
:param stop: (scalar) - value to end on the cmap between 0 and 1
:param kwargs: (kwargs) - additional keyword arguments
:return cmap: (mpl.cmap) - color map
"""
try:
if '_r' in colors:
colors = colors[:-2]
r = True
except:
pass
if colors in NCL_CMAP_NAMES:
if r:
color_list = get_color_list(NCL_CMAPS[colors].values[0])[::-1]
cmap = LinearSegmentedColormap.from_list('cmap',
colors=color_list)
else:
cmap = NCL_CMAPS[colors].values[0]
if n is None:
n = NCL_CMAPS[colors].values[1]
else:
if isinstance(colors, str):
if r:
colors += '_r'
if n is None:
n = 10
cmap = plt.get_cmap(colors, **kwargs)
elif isinstance(colors, mpl.colors.LinearSegmentedColormap):
return colors
else:
if r:
colors = colors[::-1]
if n is None and len(colors) > 2:
n = len(colors)
elif n is None:
n = 10
if not isinstance(colors[0], str):
if (np.array(colors) > 1).any():
for i, tup in enumerate(colors):
colors[i] = np.array(tup) / 255.
cmap = LinearSegmentedColormap.from_list('mycmap', colors=colors,
**kwargs)
colors = cmap(np.linspace(start, stop, cmap.N))
return LinearSegmentedColormap.from_list('mycmap', colors=colors, N=n)
def get_color_list(cmap, hexcodes=False, **kwargs):
"""
Converts a registered colormap into a list of RGB tuples or hexcodes
:param cmap_name: (mpl.cmap/str) - actual colormap or name of color
:param hexcodes: (boolean) - whether to return a list of hexcodes
:param kwargs: (kwargs) - additional keyword arguments
:return cmap: (list) - list of RGB tuples or hexcodes
"""
if isinstance(cmap, str):
if cmap in NCL_CMAP_NAMES:
cmap = NCL_CMAPS[cmap].values[0]
else:
cmap = plt.get_cmap(cmap)
if not hexcodes:
color_list = [cmap(i)[:3] for i in range(cmap.N)]
else:
color_list = [mpl.colors.rgb2hex(cmap(i)[:3])
for i in range(cmap.N)]
return color_list
def set_latlons(ax,
color=COLORS['black'],
alpha=ALPHAS['semi opaque'],
size=4,
top=False,
bottom=True,
left=True,
right=False,
lat_labels='auto',
lon_labels='auto',
central_longitude=0,
**kwargs):
"""
Set lat lon labels for a map.
:param ax: (mpl.axes) - plot axis
:param color: (scalar) - color of lat lon labels
:param alpha: (scalar/str) - transparency of lat lon labels
:param size: (scalar) - size of lat lon labels
:param bottom: (boolean) - whether to show bottom lon labels
:param top: (boolean) - whether to show top lon labels
:param left: (boolean) - whether to show left lat labels
:param right: (boolean) - whether to show right lat labels
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param kwargs: (kwargs) - additional keyword arguments
:return gl: (ax.gridlines) - gridlines
"""
from cartopy.mpl.gridliner import (LONGITUDE_FORMATTER,
LATITUDE_FORMATTER
)
size = scale_it(ax, size, 1, exp=True)
geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')
nplots = geom[0] * geom[1]
size += nplots
linewidth = np.log(nplots + 1) / 85 + 0.35
gl = ax.gridlines(draw_labels=True,
linewidth=linewidth,
color=COLORS['black'],
alpha=ALPHAS['translucid'],
linestyle=(0, (16, 4)), **kwargs) # length, how often
if lon_labels is not None and lon_labels is not 'auto':
gl.xlocator = mticker.FixedLocator(lon_labels)
elif not lon_labels:
gl.xlabels_top = False
gl.xlabels_bottom = False
if lat_labels is not None and lat_labels is not 'auto':
gl.ylocator = mticker.FixedLocator(lat_labels)
elif not lat_labels:
gl.ylabels_left = False
gl.ylabels_right = False
else:
if central_longitude != 0:
base_range = np.arange(-360, 420, 60)
base_range -= central_longitude
base_range = np.delete(base_range,
np.where(base_range == -180)[0])
gl.xlocator = mticker.FixedLocator(base_range)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabels_top = top
gl.ylabels_bottom = bottom
gl.xlabels_left = left
gl.ylabels_right = right
gl.xlabel_style = {'size': size, 'color': color, 'alpha': alpha}
gl.ylabel_style = {'size': size, 'color': color, 'alpha': alpha}
return gl
def set_figtext(ax, text, size=12, pad=0,
loc='bottom center',
color=COLORS['black'],
alpha=ALPHAS['translucent'],
fha=None, fva=None, **kwargs):
"""
Add text to the side of a figure.
loc choices - center, center bottom, center left, center right,
upper left, upper right, bottom left, bottom right.
:param ax: (mpl.axes) - plot axis
:param text: (str) - text to put on the figure
:param loc: (str) - location of the text
:param size: (int) - size in points
:param color: (str) - color of text
:param alpha: (scalar/str) - transparency of text
:param fha: (boolean) - force the horizontal alignment to be input str
:param fva: (boolean) - force the vertical alignment to be input str
:param kwargs: (kwargs) - additional keyword arguments
"""
size = scale_it(ax, size, 1, exp=True)
pad = scale_it(ax, pad, 0.005, exp=True)
loc_keywords = get_loc_keywords(loc)
if 'lower' in loc_keywords:
if 'center' in loc_keywords: # lower center
ha = 'center'
va = 'top'
x = 0.5
y = -0.09 + pad
elif 'right' in loc_keywords:
ha = 'left'
if 'corner' in loc_keywords: # lower corner right
va = 'center'
x = 0.925
y = -0.04 + pad
else: # lower right
va = 'bottom'
x = 0.925 + pad
y = 0.125
elif 'left' in loc_keywords:
ha = 'right'
if 'corner' in loc_keywords: # lower corner left
va = 'center'
x = 0.855
y = -0.04 + pad
else: # lower left
va = 'bottom'
x = 0.05
y = 0.125
elif 'upper' in loc_keywords:
if 'center' in loc_keywords:
ha = 'center'
va = 'center'
x = 0.5
y = 0.975 - pad
elif 'right' in loc_keywords:
ha = 'left'
if 'corner' in loc_keywords:
va = 'center'
x = 0.925
y = 0.975 - pad
else:
va = 'top'
x = 0.925 + pad
y = 0.9
elif 'left' in loc_keywords:
ha = 'right'
if 'corner' in loc_keywords:
va = 'center'
x = 0.855
y = 0.975 - pad
else:
va = 'top'
x = 0.05
y = 0.9
else:
va = 'center'
if 'right' in loc_keywords:
x = 0.925 + pad
y = 0.5
ha = 'left'
elif 'left' in loc_keywords:
x = 0.05
y = 0.5
ha = 'right'
else:
x = 0.5
y = 0.5
ha = 'center'
if fva is not None:
va = fva
if fha is not None:
ha = fha
plt.figtext(x, y, text,
ha=ha, va=va,
wrap=True,
size=size,
color=color,
alpha=alpha,
**kwargs)
def set_axtext(ax, text, loc='bottom center', xy=None,
size=12, color=COLORS['black'],
xpad=None, ypad=None,
alpha=ALPHAS['translucent'],
fha=None, fva=None,
**kwargs):
"""
:param ax: (mpl.axes) - plot axis
:param text: (str) - text to put on the subplot
:param loc: (str) - location of the text
:param xy: (tup) - coordinate to set text
:param size: (int) - size in points
:param color: (str) - color of text
:param xpad: (scalar) - padding in the x axis direction
:param ypad: (scalar) - padding in the y axis direction
:param alpha: (scalar/str) - transparency of text
:param fha: (boolean) - force the horizontal alignment to be input str
:param fva: (boolean) - force the vertical alignment to be input str
:param kwargs: (kwargs) - additional keyword arguments
"""
size = scale_it(ax, size, 1, exp=True)
if xy is None:
loc_keywords = get_loc_keywords(loc)
xtick_diff = np.average(np.diff(plt.getp(ax, 'xticks')))
ytick_diff = np.average(np.diff(plt.getp(ax, 'yticks')))
if ax.get_xlim()[0] > 700000:
if 'lower' in loc_keywords:
loc_keywords.remove('lower')
va = 'bottom'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[0] + ytick_diff * 0.025)
else:
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif 'upper' in loc_keywords:
loc_keywords.remove('upper')
va = 'top'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[1])
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[1])
else:
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[1])
else:
loc_keywords.remove('center')
va = 'center'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[1] / 2)
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[1] / 2)
else:
ha = 'center'
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[1] / 2)
xy = (mdates.num2date(xy[0]), xy[1])
else:
if 'lower' in loc_keywords:
loc_keywords.remove('lower')
va = 'bottom'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif ha is 'right':
xy = (ax.get_xlim()[1] * 0.985,
ax.get_ylim()[0] + ytick_diff * 0.025)
else:
xy = (ax.get_xlim()[1] / 2,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif 'upper' in loc_keywords:
loc_keywords.remove('upper')
va = 'top'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,
ax.get_ylim()[1])
elif ha is 'right':
xy = (ax.get_xlim()[1] * 0.985,
ax.get_ylim()[1])
else:
xy = (ax.get_xlim()[1] / 2,
ax.get_ylim()[1])
else:
loc_keywords.remove('center')
va = 'center'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + ax.get_xlim()[1] * 0.025,
ax.get_ylim()[1] / 2)
elif ha is 'right':
xy = (ax.get_xlim()[1] * 0.985,
ax.get_ylim()[1] / 2)
else:
ha = 'center'
xy = (ax.get_xlim()[1] / 2,
ax.get_ylim()[1] / 2)
else:
ha = 'left'
va = 'center'
if isinstance(xy[0], str):
xy = (pd.to_datetime(xy[0]).to_pydatetime(), xy[1])
if fva is not None:
va = fva
if fha is not None:
ha = fha
if xpad is not None:
xy = (xy[0] + xpad, xy[1])
if ypad is not None:
xy = (xy[0], xy[1] + ypad)
ax.annotate(text, xy=xy, size=size,
color=color, alpha=alpha,
ha=ha, va=va, **kwargs)
def get_loc_keywords(loc):
"""
Return the location keywords based on input loc.
:param loc: (str) - location of the text
:return loc_keywords: (list) - list of the location keywords
"""
loc = loc.lower()
loc_keywords = []
if 'top' in loc or 'upper' in loc or 'north' in loc:
loc_keywords.append('upper')
elif 'bottom' in loc or 'lower' in loc or 'south' in loc:
loc_keywords.append('lower')
if 'right' in loc or 'east' in loc or 'east' in loc:
loc_keywords.append('right')
elif 'left' in loc or 'west' in loc:
loc_keywords.append('left')
if 'center' in loc or 'middle' in loc:
loc_keywords.append('center')
if 'corner' in loc:
loc_keywords.append('corner')
return loc_keywords
def set_share(ax1, ax2, axis='x', xlabel='', ylabel=''):
"""
Match the tick locations of another axis and hide the current tick labels.
:param ax1: (mpl.axes) - plot axis to adapt
:param ax2: (mpl.axes) - plot axis to mimic ticks
:param axis: (str) - share x or y axis
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:return ax1, ax2: (mpl.axes) - plot axes
"""
if 'x' in axis:
xlim = plt.getp(ax2, 'xlim')
ax1.set_xlim(xlim)
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_xlabel(xlabel, labelpad=12)
if 'y' in axis:
ylim = plt.getp(ax2, 'ylim')
ax1.set_ylim(ylim)
plt.setp(ax1.get_yticklabels(), visible=False)
ax1.set_ylabel(ylabel, labelpad=12)
return ax1, ax2
def get_region_latlim(region, lat1=-90, lat2=90, lon1=-180, lon2=180,
tup=False, sliceit=False, w2e=False):
"""
Get latitudinal and longitudinal extents of select regions.
:param region: (str) - acronym of region [us/na/nino34/nh/sh]
:param lat1: (scalar) lower limit of latitude
:param lat2: (scalar) upper limit of latitude
:param lon1: (scalar) left limit of longitude
:param lon2: (scalar) right limit of longitude
:param central_longitude: (scalar) - longitude to center the map on
:param tup: (bool) - whether to return a tuple of extents
:param sliceit: (bool) - whether to return a slice type of extents
:return lat1, lat2, lon1, lon2: (scalar) - individual extents
:return latlim, lonlim: (tuple) - tuple extents
:return lat_slice, lon_slice: (slice) - slice extents
"""
if region == 'us':
lat1 = 50
lat2 = 22
lon1 = -128
lon2 = -65
elif region == 'na':
lat1 = 73
lat2 = 10
lon1 = -176
lon2 = -65
elif region == 'nino34':
lat1 = -5
lat2 = 5
lon1 = -120
lon2 = -170
elif region == 'nh':
lat1 = 0
lat2 = 90
elif region == 'sh':
lat1 = -90
lat2 = 0
elif region == 'wh':
lon1 = -180
lon2 = 0
elif region == 'eh':
lon1 = 0
lon2 = 180
elif region == None or region == '':
pass
else:
print('Region not found!')
if w2e:
lon1 = lonw2e(lon1)
lon2 = lonw2e(lon2)
if tup:
return (lat1, lat2), (lon1, lon2)
elif sliceit:
return slice(lat1, lat2), slice(lon1, lon2)
else:
return lat1, lat2, lon1, lon2
def set_twin(ax1, ax2, axis='x', title_pad=1.09,
xlabel='', ylabel='', title='', suptitle=False,
aligned=True, length_scale=False):
"""
Create another y axis on the same subplot.
:param ax1: (mpl.axes) - plot axis on the right to adapt
:param ax2: (mpl.axes) - plot axis on the left or the one to mimic
:param axis: (str) - twin x or y axis
:param xlabel: (str) - label of x axis
:param ylabel: (str) - label of y axis
:param title: (str) - title of subplot
:param suptitle: (boolean) - whether to make a figure title
:param aligned: (boolean) - whether to keep left and right ticks aligned
:param scale: (scalar) - scaling exponent
:return ax1, ax2: (mpl.axes) - plot axes
"""
children1 = ax1.get_children()
children2 = ax2.get_children()
ylabel2 = plt.getp(ax2, 'ylabel')
xlabel = plt.getp(ax2, 'xlabel')
title = plt.getp(ax2, 'title')
try:
plotlist1 = list(filter(lambda x:
isinstance(x, mpl.lines.Line2D),
children1
)
)
if len(plotlist1) == 1:
plot1 = plotlist1[0]
color1 = plt.getp(plot1, 'color')
else:
plot1 = list(filter(lambda x:
isinstance(x, mpl.patches.Rectangle),
children1)
)[0]
color1 = plt.getp(plot1, 'facecolor')
plotlist2 = list(filter(lambda x:
isinstance(x, mpl.lines.Line2D),
children2)
)
if len(plotlist2) == 1:
plot2 = plotlist2[0]
color2 = plt.getp(plot2, 'color')
else:
plot2 = list(filter(lambda x:
isinstance(x, mpl.patches.Rectangle),
children2)
)[0]
color2 = plt.getp(plot2, 'facecolor')
except Exception:
color1 = COLORS['gray']
color2 = COLORS['gray']
print('Unable to get color for twinx.')
if 'x' in axis:
set_borders(ax2, spines=['left'], color=color2)
set_borders(ax2, spines=['right'], color=color1)
if aligned:
yticks2 = np.linspace(ax2.get_yticks()[0],
ax2.get_yticks()[-1],
len(ax2.get_yticks())
)
yticks1 = np.linspace(ax1.get_yticks()[0],
ax1.get_yticks()[-1],
len(ax2.get_yticks())
)
set_major_grid(ax2)
else:
yticks2 = None
yticks1 = None
set_major_grid(ax1, ycolor=color1, yalpha=ALPHAS['translucent'])
set_major_grid(ax2, ycolor=color2, yalpha=ALPHAS['translucent'])
set_borders(ax1, all_=False)
set_major_ticks(ax2,
yticks=yticks2,
axes=['y'],
bottom=True,
left=True,
right=False,
top=True,
color=color2)
set_minor_ticks(ax2,
axes=['y'],
bottom=True,
left=True,
right=False,
top=True,
color=color2)
set_major_tick_labels(ax2,
axes=['y'],
left=True,
right=False,
color=color2)
set_minor_tick_labels(ax2,
axes=['y'],
left=True,
right=False,
color=color2)
set_labels(ax2, xlabel=xlabel, ylabel=ylabel2, title_pad=title_pad,
title=title, suptitle=suptitle, ylabel_color=color2)
set_major_ticks(ax1,
yticks=yticks1,
axes=['y'],
bottom=False,
left=False,
right=True,
top=False,
color=color1)
set_minor_ticks(ax1,
axes=['y'],
bottom=False,
left=False,
right=True,
top=False,
color=color1)
set_major_tick_labels(ax1,
axes=['y'],
left=False,
right=True,
color=color1)
set_minor_tick_labels(ax1,
axes=['y'],
left=False,
right=True,
color=color1)
set_labels(ax1, ylabel=ylabel, ylabel_color=color1,
length_scale=length_scale)
return ax1, ax2
def set_axes(ax, xlim=None, ylim=None,
xscale=None, yscale=None,
xinvert=False, yinvert=False, **kwargs):
"""
Modify subplot axes settings.
:param ax: (mpl.axes) - plot axis
:param xlim: (tup) - left and right x axis limit in a tuple, respectively
:param ylim: (tup) - left and right y axis limit in a tuple, respectively
:param xscale: (str) - linear or log scale of x axis
:param yscale: (str) - linear or log scale of y axis
:param xinvert: (boolean) - whether to flip x axis
:param yinvert: (boolean) - whether to flip y axis
:param kwargs: (kwargs) - additional keyword arguments
:return ax: (mpl.axes) - plot axis
"""
if xlim is None:
xlim = plt.getp(ax, 'xlim')
if ylim is None:
ylim = plt.getp(ax, 'ylim')
if xscale is None:
xscale = plt.getp(ax, 'xscale')
if yscale is None:
yscale = plt.getp(ax, 'yscale')
if isinstance(xlim[0], str):
xlim = | pd.to_datetime(xlim) | pandas.to_datetime |
import pandas as pd
import numpy as np
def raw_to_no_missing():
df = pd.read_csv("compas-scores-two-years.csv")
# We will add two new features
df['in_custody'] = pd.to_datetime(df['in_custody'])
df['out_custody'] = | pd.to_datetime(df['out_custody']) | pandas.to_datetime |
"""
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not len(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
| tm.assert_index_equal(result_dropped, unique_idx) | pandas._testing.assert_index_equal |
"""scr_get_work_details
This script will export in a csv file one or several works from senscritique.
The -f option will use the "URL" field of a csv file.
Launch the script with the -h flag to see available options.
"""
import logging
import time
import argparse
import pandas as pd
from senscritiquescraper import Senscritique
logger = logging.getLogger()
temps_debut = time.time()
def main():
args = parse_args()
if args.main_argument:
file = args.main_argument
elif args.file:
file = args.file
else:
logger.error("No file entered. Exiting.")
exit()
df = | pd.read_csv(file, sep="\t") | pandas.read_csv |
import pandas as pd
import no_transfer_linear, no_transfer_lstm
import global_linear_linear, global_linear_lstm, global_lstm_linear, global_lstm_lstm
import torch
import utils
import pickle
import numpy as np
import random
# data params
manualSeed = 999999999
np.random.seed(manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
data_config = {"data_path": ".\\Tasks\\",
"region": ["Americas", "Europe", "Asia and Pacific", "MEA"],
"Europe": ["Europe_AEX", "Europe_ASE", "Europe_ATX", "Europe_BEL20", "Europe_BUX",
"Europe_BVLX", "Europe_CAC", "Europe_CYSMMAPA", "Europe_DAX", "Europe_HEX",
"Europe_IBEX", "Europe_ISEQ", "Europe_KFX", "Europe_OBX", "Europe_OMX",
"Europe_SMI", "Europe_UKX", "Europe_VILSE", "Europe_WIG20", "Europe_XU100",
"Europe_SOFIX", "Europe_SBITOP", "Europe_PX", "Europe_CRO"],
"Asia and Pacific": ["Asia and Pacific_AS51", "Asia and Pacific_FBMKLCI", "Asia and Pacific_HSI",
"Asia and Pacific_JCI", "Asia and Pacific_KOSPI", "Asia and Pacific_KSE100",
"Asia and Pacific_NIFTY", "Asia and Pacific_NKY", "Asia and Pacific_NZSE50FG",
"Asia and Pacific_PCOMP", "Asia and Pacific_STI", "Asia and Pacific_SHSZ300",
"Asia and Pacific_TWSE"],
"Americas": ["Americas_IBOV", "Americas_MEXBOL", "Americas_MERVAL", "Americas_SPTSX", "Americas_SPX",
"Americas_RTY"],
"MEA": ["MEA_DFMGI", "MEA_DSM", "MEA_EGX30", "MEA_FTN098", "MEA_JOSMGNFF",
"MEA_KNSMIDX", "MEA_KWSEPM", "MEA_MOSENEW", "MEA_MSM30", "MEA_NGSE30", "MEA_PASISI",
"MEA_SASEIDX", "MEA_SEMDEX", "MEA_TA-35", "MEA_TOP40"],
"additional_data_path": "_all_assets_data.pkl.gz"
}
# problem params
problem_config = {"export_path": ".\\",
"val_period": 0, # if val is 0, then its results are the same as training
"holdout_period": 252 * 3,
}
# model params
model_config = {"tsteps": 10,
"tasks_tsteps": 0, # [len(data_config[x]) for x in data_config["region"]] - right
"batch_size": 32,
"seq_len": 252,
"transfer_strat": "global_lstm_lstm",
"device": torch.device("cuda"),
"export_losses": False,
"no_transfer_linear": {"opt_lr": 0.001,
"amsgrad": True,
"export_weights": False
},
"no_transfer_lstm": {"opt_lr": 0.001,
"amsgrad": True,
"export_model": False,
"out_nhi": 50,
"nlayers": 2,
"drop_rate": 0.1
},
"global_linear_linear": {"opt_lr": 0.01,
"amsgrad": True,
"export_weights": False,
"in_transfer_dim": 200,
"out_transfer_dim": 200
},
"global_lstm_linear": {"opt_lr": 0.01,
"amsgrad": True,
"export_model": False,
"in_transfer_dim": 5,
"out_transfer_dim": 5,
"nlayers": 2,
"drop_rate": 0.1
},
"global_linear_lstm": {"opt_lr": 0.01,
"amsgrad": True,
"export_model": False,
"in_transfer_dim": 5,
"out_transfer_dim": 5,
"in_nlayers": 2,
"out_nlayers": 2,
"out_nhi": 10,
"drop_rate": 0.1
},
"global_lstm_lstm": {"opt_lr": 0.01,
"amsgrad": True,
"export_model": False,
"in_transfer_dim": 5,
"out_transfer_dim": 5,
"in_nlayers": 2,
"out_nlayers": 2,
"nlayers": 2,
"out_nhi": 10,
"drop_rate": 0.1,
"drop_rate_transfer": 0.1
}
}
# main routine
# pre-allocation
export_label = "valperiod_" + str(problem_config["val_period"]) + "_testperiod_" + str(problem_config["holdout_period"]) + \
"_tsteps_" + str(model_config["tsteps"]) + "_tksteps_" + str(model_config["tasks_tsteps"]) + "_batchsize_" + \
str(model_config["batch_size"]) + "_seqlen_" + str(model_config["seq_len"]) + "_transferstrat_" + \
model_config["transfer_strat"] + "_lr_" + str(model_config[model_config["transfer_strat"]]["opt_lr"])
data_config["export_label"] = export_label
problem_config["export_label"] = export_label
model_config["export_label"] = export_label
model_config["export_path"] = problem_config["export_path"]
# get data
Xtrain_tasks, Xval_tasks, Xtest_tasks = utils.get_data(data_config, problem_config, model_config)
# set model
if model_config["transfer_strat"] == "no_transfer_linear":
transfer_trad_strat = no_transfer_linear.NoTransferLinear(Xtrain_tasks, model_config)
add_label = [""] * len(data_config["region"])
elif model_config["transfer_strat"] == "no_transfer_lstm":
transfer_trad_strat = no_transfer_lstm.NoTransferLSTM(Xtrain_tasks, model_config)
add_label = ["_nhi_" + str(model_config["no_transfer_lstm"]["out_nhi"]) +
"_nlayers_" + str(model_config["no_transfer_lstm"]["nlayers"]) +
"dpr" + str(model_config["no_transfer_lstm"]["drop_rate"]) for x in data_config["region"]]
elif model_config["transfer_strat"] == "global_linear_linear":
transfer_trad_strat = global_linear_linear.GlobalLinearLinear(Xtrain_tasks, model_config)
add_label = ["_indim_" + str(model_config["global_linear_linear"]["in_transfer_dim"]) +
"_outdim_" + str(model_config["global_linear_linear"]["out_transfer_dim"]) for x in data_config["region"]]
elif model_config["transfer_strat"] == "global_lstm_linear":
transfer_trad_strat = global_lstm_linear.GlobalLSTMLinear(Xtrain_tasks, model_config)
add_label = ["_indim_" + str(model_config["global_lstm_linear"]["in_transfer_dim"]) +
"_outdim_" + str(model_config["global_lstm_linear"]["out_transfer_dim"]) +
"_inlay_" + str(model_config["global_lstm_linear"]["nlayers"]) +
"dpr" + str(model_config["global_lstm_linear"]["drop_rate"]) for x in data_config["region"]]
elif model_config["transfer_strat"] == "global_linear_lstm":
transfer_trad_strat = global_linear_lstm.GlobalLinearLSTM(Xtrain_tasks, model_config)
add_label = ["_indim_" + str(model_config["global_linear_lstm"]["in_transfer_dim"]) +
"_outdim_" + str(model_config["global_linear_lstm"]["out_transfer_dim"]) +
"_inlay_" + str(model_config["global_linear_lstm"]["in_nlayers"]) +
"_outlay_" + str(model_config["global_linear_lstm"]["out_nlayers"]) +
"_lindim_" + str(model_config["global_linear_lstm"]["out_nhi"]) +
"dpr" + str(model_config["global_linear_lstm"]["drop_rate"]) for x in data_config["region"]]
elif model_config["transfer_strat"] == "global_lstm_lstm":
transfer_trad_strat = global_lstm_lstm.GlobalLSTMLSTM(Xtrain_tasks, model_config)
add_label = ["_indim_" + str(model_config["global_lstm_lstm"]["in_transfer_dim"]) +
"_outdim_" + str(model_config["global_lstm_lstm"]["out_transfer_dim"]) +
"_inlay_" + str(model_config["global_lstm_lstm"]["in_nlayers"]) +
"_outlay_" + str(model_config["global_lstm_lstm"]["out_nlayers"]) +
"_odim_" + str(model_config["global_lstm_lstm"]["out_nhi"]) +
"_ltr_" + str(model_config["global_lstm_lstm"]["nlayers"]) +
"_dpr_" + str(model_config["global_lstm_lstm"]["drop_rate"]) +
"_dtr_" + str(model_config["global_lstm_lstm"]["drop_rate_transfer"]) for x in data_config["region"]]
# additional labelling
to_add_label = {}
for (lab, region) in zip(add_label, data_config["region"]):
to_add_label[region] = lab
# train model
import time
start=time.time()
transfer_trad_strat.train()
print(time.time()-start)
# get signals
Xtrain_signal = transfer_trad_strat.predict(Xtrain_tasks)
Xval_signal = transfer_trad_strat.predict(Xval_tasks)
Xtest_signal = transfer_trad_strat.predict(Xtest_tasks)
# compute results
k = True
for region in data_config["region"]:
region_task_paths = [t + "_all_assets_data.pkl.gz" for t in data_config[region]]
z = True
for (tk, tk_path) in zip(data_config[region], region_task_paths):
# get signal
pred_train = Xtrain_signal[region][tk].cpu()
pred_val = Xval_signal[region][tk].cpu()
pred_test = Xtest_signal[region][tk].cpu()
# get target
Ytrain = Xtrain_tasks[region][tk].view(1, -1, Xtrain_tasks[region][tk].size(1))[:, 1:].cpu()
Yval = Xval_tasks[region][tk].view(1, -1, Xval_tasks[region][tk].size(1))[:, 1:].cpu()
Ytest = Xtest_tasks[region][tk].view(1, -1, Xtest_tasks[region][tk].size(1))[:, 1:].cpu()
# compute returns
df_train_ret = pred_train.mul(Ytrain)[0].cpu().numpy() - utils.calc_tcosts(pred_train)[0].cpu().numpy()
df_val_ret = pred_val.mul(Yval)[0].cpu().numpy() - utils.calc_tcosts(pred_val)[0].cpu().numpy()
df_test_ret = pred_test.mul(Ytest)[0].cpu().numpy() - utils.calc_tcosts(pred_test)[0].cpu().numpy()
# get performance metrics
df = pd.read_pickle(data_config["data_path"] + tk_path)
df_train_ret = | pd.DataFrame(df_train_ret, columns=df.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn import linear_model
from itertools import combinations
from .stats import *
from functools import partial
import multiprocessing as mp
from tqdm import tqdm
def csRenameOrth(adQuery,adTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(adQuery.var_names.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(adTrain.var_names.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
adT=adTrain[:, oTab[speciesTrain]]
adQ=adQuery[:, oTab[speciesQuery]]
adQ.var_names = adT.var_names
return [adQ, adT]
def csRenameOrth2(expQuery,expTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(expQuery.columns.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(expTrain.columns.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
expT=expTrain.loc[:, oTab[speciesTrain]]
expQ=expQuery.loc[:, oTab[speciesQuery]]
expQ.columns= expT.columns
return [expQ, expT]
def makePairTab(genes):
pairs = list(combinations(genes, 2))
labels = ['genes1', 'genes2']
pTab = pd.DataFrame(data=pairs, columns=labels)
pTab['gene_pairs'] = pTab['genes1']+'_'+pTab['genes2']
return(pTab)
def gnrAll(expDat, cellLabels):
myPatternG = sc_sampR_to_pattern(cellLabels)
res = dict()
groups = np.unique(cellLabels)
for i in range(0, len(groups)):
res[groups[i]] = sc_testPattern(myPatternG[groups[i]], expDat)
return res
def getClassGenes(diffRes, topX=25, bottom=True):
xi = ~pd.isna(diffRes["cval"])
diffRes = diffRes.loc[xi,:]
sortRes= diffRes.sort_values(by="cval", ascending=False)
ans=sortRes.index.values[0:topX]
if bottom:
l= len(sortRes)-topX
ans= np.append(ans, sortRes.index.values[l:] ).flatten()
return ans
def addRandToSampTab(classRes, sampTab, desc, id="cell_name"):
cNames= classRes.index.values
snames= sampTab.index.values
rnames= np.setdiff1d(cNames, snames)
stNew= pd.DataFrame()
stNew["rid"]=rnames
stNew["rdesc"]="rand"
stTop=sampTab[[id, desc]]
stNew.columns= [id, desc]
ans = stTop.append(stNew)
return ans
def ptSmall(expMat, pTab):
npairs = len(pTab.index)
genes1 = pTab['genes1'].values
genes2 = pTab['genes2'].values
expTemp = expMat.loc[:, np.unique(np.concatenate([genes1, genes2]))]
ans = pd.DataFrame(0, index=expTemp.index, columns=np.arange(npairs))
ans = ans.astype( | pd.SparseDtype("int", 0) | pandas.SparseDtype |
import sys
import os
import numpy as np
from collections import Counter
import pandas as pd
from sklearn.metrics import auc
import chromHMM_utilities_common_functions_helper as helper
TRAIN_INDEX = 0
TEST_INDEX = 1
PRECISION_INDEX = 0
RECALL_INDEX = 1
PRECAL_HEADER_LIST = ["_precision", "_recall"]
PRECAL_AXIS_NAME_LIST = ["Precision", "Recall"]
TRUE_POS_INDEX = 0
FALSE_POS_INDEX = 1
TRUE_FALSE_HEADER_LIST = ["_true_pos", "_false_pos"]
TRUE_FALSE_AXIS_NAME_LIST = ["True Positive rates", "False positive rates"]
X_AXIS_INDEX = 1
Y_AXIS_INDEX = 0
ROC_INDEX = 0
PRECISION_RECALL_INDEX = 1
DATA_FILE_SUFFIX_LIST = ["_roc.csv", "_prec_recall.csv"]
PLOT_FILE_SUFFIX_LIST = ["_roc.png", "_prec_recall.png"]
HEADER_LIST_LIST = [TRUE_FALSE_HEADER_LIST, PRECAL_HEADER_LIST]
AXIS_NAME_LIST_LIST = [TRUE_FALSE_AXIS_NAME_LIST, PRECAL_AXIS_NAME_LIST]
def get_enrichment_analysis_name_from_file_name(fn):
"""
Output from ChromHMM Overlap Enrichment usually denotes the enrichment analysis name as the file names existing in the coordinate directory: For example: mutation_occ1.bed.gz
We want the enrichment_analysis_name to be mutation_occ1
"""
return (fn.split("."))[0]
def open_one_enrichment_df(fn):
df = pd.read_csv(fn, sep = '\t', header = 0)
try:
df = df.rename(columns={"state (Emission order)": "state", "Genome %": "percent_in_genome", "state (User order)" : "state"})
except:
print ("Could not convert data columns headers for data frame: " + fn)
exit(1)
return df
def quality_control_all_enrichment_df (data_frame_list):
headers_list = list(map(lambda x: x.columns, data_frame_list))
print (list(map(lambda x: len(x) , headers_list)))
# make sure that all lenth of each of the header_list is the same
if len(set(list(map(lambda x: len(x) , headers_list)))) != 1: # check that all enrichment files have the same headers
print ("The length of the headers_list in all enrichment files is not the same. Exiting...")
exit(1)
for i in range(len(headers_list[0])): # check that all headers of all the enrichment files are the same
if len(set(list(map(lambda x: x[i], headers_list)))) != 1:
print ("Header indexed: " + str(i) + " of the headers_list is not consistent")
print (list(map(lambda x: x[i], headers_list)))
print ("exiting ....")
exit(1)
if headers_list[0][0] != "state": # check that the first header is state, in all enrichment files
print ("The first headers is not state: " + headers_list[0][0])
print ("exiting ...")
exit(1)
if headers_list[0][1] != "percent_in_genome": # check that the second header is percent_in_genome, in all enrichment files
print ("The second headers is not percent_in_genome: " + headers_list[0][0])
print ("exiting ...")
exit(1)
return
def get_enrichment_data(train_test_enrich_folder_list):
train_fn_list = list(map(lambda x: os.path.join(x, "train_overlap.txt"), train_test_enrich_folder_list))
test_fn_list = list(map(lambda x: os.path.join(x, 'test_overlap.txt'), train_test_enrich_folder_list))
train_df_list = list(map(open_one_enrichment_df, train_fn_list))
test_df_list = list(map(open_one_enrichment_df, test_fn_list))
quality_control_all_enrichment_df(train_df_list)
quality_control_all_enrichment_df(test_df_list)
return list(zip(train_df_list, test_df_list)) # return a list of tuples. Each item in the list: a pair of df for a model, train (0) and test(1)
def change_fract_state_be_cont_to_one (value):
# this only happen once during calculation of CDS enrichment for 100-state E129. It's because of decimal point error
if value > 1.0:
return 1.0
return value
def do_roc_analysis(this_cont_df, train_cont_percent, test_cont_percent, enr_cont_name, enrichment_model_name):
"""
consider 'gContext' as the analysis that we are talking about
train_test_df_tuple: has 4 columns train_percent_in_genome, test_percent_in_genome, train_<cont_name>, test_<cont_name>
"""
# filter out state that is non existent on the genome
this_cont_df = this_cont_df.loc[this_cont_df['test_percent_in_genome'] > 0].copy()
# sort states based on decreasing enrichment of the enrichment_analysis_name, looking at the enrichment values in train data
this_cont_df.sort_values(by='train_' + enr_cont_name, ascending = False, inplace = True)
num_rows, num_cols = this_cont_df.shape # ncols should be 4: percent_in_genome, genomic context of interest for train and test data
# calculate true and false positive rates. From now on all the analysis focuses on the test data, the training fold enrichment statistics were only used for ordering the states
test_fract_genome_in_cont = test_cont_percent / 100.0 # fraction of the genome that the cont occupies
test_fract_genome_not_cont = 1 - test_fract_genome_in_cont # fraction of the genome that the cont does not occupy
this_cont_df['fract_in_genome'] = this_cont_df['test_percent_in_genome'] / 100.0
this_cont_df['culm_frac_gene_in_state'] = (this_cont_df.fract_in_genome).cumsum()
this_cont_df['fract_cont_in_state'] = this_cont_df['fract_in_genome'] * this_cont_df['test_' + enr_cont_name]
this_cont_df['fract_state_be_cont'] = this_cont_df['test_' + enr_cont_name] * test_fract_genome_in_cont
this_cont_df['fract_state_be_cont'] = this_cont_df['fract_state_be_cont'].apply(change_fract_state_be_cont_to_one)# this only happen once during calculation of CDS enrichment for 100-state E129. It's because of decimal point error
this_cont_df['fract_genome_in_state_and_cont'] = this_cont_df['fract_state_be_cont'] * this_cont_df['fract_in_genome']
this_cont_df['culm_fract_gene_in_state_and_cont'] = (this_cont_df.fract_genome_in_state_and_cont).cumsum()
this_cont_df['fract_genome_in_state_not_cont'] = this_cont_df['fract_in_genome'] * (1 - this_cont_df['fract_state_be_cont'])
this_cont_df['fract_not_cont_in_state'] = this_cont_df['fract_genome_in_state_not_cont'] / test_fract_genome_not_cont
this_cont_df['true_pos'] = this_cont_df['culm_fract_gene_in_state_and_cont'] / test_fract_genome_in_cont
this_cont_df['false_pos'] = (this_cont_df.fract_not_cont_in_state).cumsum()
this_cont_df['precision'] = this_cont_df['culm_fract_gene_in_state_and_cont'] / this_cont_df['culm_frac_gene_in_state']
this_cont_df['recall'] = this_cont_df['true_pos']
result_df = this_cont_df[['true_pos', 'false_pos', 'precision', 'recall']]
first_row = pd.DataFrame({'true_pos' : [0], 'false_pos' : [0], 'precision' : [0], 'recall' : [0]})
result_df = | pd.concat([first_row, result_df]) | pandas.concat |
#
# Gathers and processes relevant data from multiple apis and files
# and builds hydrodynamic lake model code as configured by the user.
#
# Basically just needs one coordinate within the target lake,
# model grid resolution and desired time interval.
#
# Required data sources:
# - NLS terrain database: (local) files divided into map sheets
# - Map sheets: (local) sheet shapefile (utm25LR.shp)
# - Lake shore polygons: (local) SYKE Ranta10 shapefile (jarvi10.shp)
# - Channel data: (local) SYKE channel shapefile (Uoma10.shp)
# - Traficom depth data: (remote) Open WFS service
# - Weather data: (remote) FMI's open WFS service
# - Hydrological database: (remote) SYKE's OData API
# <NAME> / SYKE / oGIIR project / 2019
# Requirements and working environment:
# 1) CSC Taito cluster with geo-env module loaded
# 2) NLS terrain geodatabase
# 3) SYKE hydrological API
# 4) SYKE channel network geodatabase
# 5) Map sheet geodatabase for figuring out how NLS data is divided between files
# Last changed: 2021-09-08 / JR
from __future__ import print_function
from scipy import interpolate
from shapely.geometry import Point
from shapely.ops import unary_union
import geopandas
#import matplotlib.pyplot as plt
import numpy
import pandas
import sys
import shapely
import config # Parameters etc
import TRAFICOMdata
import SYKEdata
import FMIwfs
import ranta10
import build_model_code
###################################################################
# getBoundaryFlows
###################################################################
def getBoundaryFlows(gdf_channels, poly_lake):
"""Determines lake inflow and outflow coordinates based on where
where channel data intersects the lake polygon borders.
Assumes that the channel coordinates are always in outflowing direction,
i.e. first point is an upstream point and last point is a downstream point.
Parameters:
channel data
lake polygon
Returns:
points_outflow (list of shapely.Point)
Coordinates of outflows
points_inflow (list of shapely.Point)
Coordinates of inflows
"""
points_inflow = []
points_outflow = []
# Build lists of inflow and outflow points
for i, c in gdf_channels.iterrows():
p0 = Point(c.geometry.xy[0][0], c.geometry.xy[1][0]) # Start coordinate of channel
p1 = Point(c.geometry.xy[0][-1], c.geometry.xy[1][-1]) # End coordinate of channel
#
if (not p0.intersects(poly_lake)) and p1.intersects(poly_lake): # Inflow channel
points_inflow.append(c.geometry.intersection(poly_lake.exterior))
elif p0.intersects(poly_lake) and (not p1.intersects(poly_lake)): # Outflow channel
points_outflow.append(c.geometry.intersection(poly_lake.exterior))
elif p0.intersects(poly_lake.exterior) and p1.intersects(poly_lake.exterior): # Inflow and outflow
points_inflow.append(p0)
points_outflow.append(p1)
return points_inflow, points_outflow
#-------------
# PolygonToXYZ
#-------------
def PolygonToXYZ(poly, elev=0.0):
"""Converts Polygons to list of coordinate tuples (x,y,z).
Parameters:
poly: shapely.Polygon
elev: float
Optional z level.
Returns:
List of polygon border coordinate tuples.
"""
print("Converting polygon to points:")
points = []
# Exterior points
x, y = poly.exterior.xy
for p in range(1,len(x)): # Ignore first (or last) coordinate pair as it is repeated
points.append((x[p],y[p],elev))
print("...found",len(x),"exterior points.")
# Interior points
nbi = len(poly.interiors)
print("...found", nbi, "islands.")
for i in range(0,nbi):
x, y = poly.interiors[i].xy
for p in range(1,len(x)): # Ignore first (or last) coordinate pair as it is repeated
points.append((x[p],y[p],elev))
print("...found a total of",len(points),"shoreline points.")
return points
###################################################################
# FeaturesToXYZ
###################################################################
def FeaturesToXYZ(geodataframe, attr, adjustment=0.0, mult=1.0):
"""Converts Linestring collections to points
Parameters:
GeoDataFrame geodataframe
Object to convert
string attr
Attribute to convert
float adjustment
Optional bias value
float mult
Optional multiplier for unit conversions
Returns:
List of (x,y,z) tuples representing the Linestring coordinates.
"""
print("Converting features to points:")
points = []
print("...found",len(geodataframe),"features.")
for row in range(0,len(geodataframe)):
x, y = geodataframe.iloc[row].geometry.xy
z = float(geodataframe[attr].iloc[row])
for p in range(0,max(len(x)-1,1)): # Ignore last (or first) coordinate pair as it is repeated
points.append((x[p],y[p],mult*z+adjustment))
print("...found a total of",len(points),"points.")
return points
#------------------------------------------------------------
# Generates a 3-D array of depths from a collection of points
#------------------------------------------------------------
def generate_grid_griddata(points, gridresolution, interpolationmethod):
# Generate x and y coordinates based on what area the input points cover and wanted grid resolution
coordsx = numpy.arange(min(points[:,0])+gridresolution/2.0, max(points[:,0]), gridresolution)
coordsy = numpy.arange(min(points[:,1])+gridresolution/2.0, max(points[:,1]), gridresolution)
nx = len(coordsx)
ny = len(coordsy)
#
# Generate a 1-D list of x, y coordinates where to interpolate point data
coordgrid = numpy.meshgrid(coordsx,coordsy)
coordlist = numpy.stack( (coordgrid[0].flatten(), coordgrid[1].flatten()) ).transpose()
#
# Simple data interpolation to grid
interpolated = interpolate.griddata( points[:,0:2], points[:,2], coordlist, method=interpolationmethod )
nearest = interpolate.griddata( points[:,0:2], points[:,2], coordlist, method='nearest' )
interpolated = numpy.where(numpy.isnan(interpolated), nearest, interpolated) # Fill nans with data from nearest neighbours
#
# Reshape coordinate and interpolated z data into a 3-D array
grid = numpy.stack( (coordlist[:,0],coordlist[:,1],interpolated), axis=1)
grid3d = grid.reshape(nx, ny, 3)
#
return nx, ny, grid3d
#------------------------------------------------------------
# Generates a 3-D array of depths from a collection of points
#------------------------------------------------------------
def generate_grid_rbf(points, gridresolution, interpolationmethod):
# Generate x and y coordinates based on what area the input points cover and wanted grid resolution
coordsx = numpy.arange(min(points[:,0])+gridresolution/2.0, max(points[:,0]), gridresolution)
coordsy = numpy.arange(min(points[:,1])+gridresolution/2.0, max(points[:,1]), gridresolution)
nx = len(coordsx)
ny = len(coordsy)
xi, yi = numpy.meshgrid(coordsx,coordsy)
#
# Simple data interpolation to grid
rbf = interpolate.Rbf(points[:,0],points[:,1],points[:,2])#, function=interpolationmethod, smooth=0 )
zi = rbf(xi, yi)
#
# Reshape coordinate and interpolated z data into a 3-D array
grid = numpy.stack( (xi, yi, zi), axis=1)
grid3d = grid.reshape(nx, ny, 3)
#
return nx, ny, grid3d
#------------------------------------------------------------
# Generates a 3-D array of depths from a collection of points
#------------------------------------------------------------
def generate_grid_smart(points, resolution, MWL, interpolationmethod='linear'):
"""
Smart grid interpolator that first combines all available height data points
to grid cells while determining the most sensible way to use the data for the
grid cell (interpolating, averaging or selecting the most sensible value).
Then the grid is filled by interpolating.
The purpose is to produce a sensible calculation grid for 3-D lake modelling
purposes.
"""
hix = numpy.arange(min(points[:,0]), max(points[:,0]), resolution)
hiy = numpy.arange(min(points[:,1]), max(points[:,1]), resolution)
nx = len(hix)
ny = len(hiy)
# Sort all data by y-coordinate
sortedpoints = numpy.sort(points.view('f8,f8,f8'), order=['f1'], axis=0).view(float)
nearest = numpy.full((nx-1,ny-1),numpy.nan)
dbgsum = 0
# Decimate data to regular grid cell by cell. Average original data to cells.
# Relies heavily on the fact that the arrays being operated on are sorted correctly.
# Assumes sortedpoints doesn't contain any points outside hix, hiy limits (especially lower)
for j in range(0,ny-1): # row loop from bottom of the grid to the top
#print("Row",j+1,"of",ny-1,"(",100*j/(ny-1),"% )")
rowpoints = sortedpoints[sortedpoints[:,1]<hiy[j+1]] # Get points in this row
rowpoints = numpy.sort(rowpoints.view('f8,f8,f8'), order=['f0'], axis=0).view(float) # Sort by columns
sortedpoints = sortedpoints[sortedpoints[:,1]>=hiy[j+1]]
idx = 0
plen = len(rowpoints)
for i in range(0,nx-1): # column loop, left to right
localpoints = numpy.empty((0,3))
for s in range(idx,plen):
if rowpoints[s,0]>=hix[i+1]: # Skip to next cell if point is outside current cell
idx = s # Start with this point at next cell
break
else: # Append current point to this cell
localpoints=numpy.append(localpoints, [rowpoints[s,:]], axis=0)
idx = s + 1 # Start with next point next time
# Selecting cell depth value (interpolation, averaging or minimum)
if len(localpoints)>1: # Interpolate cell value if cell contains more than 1 point
dbgsum += len(localpoints)
if numpy.any(localpoints[:,2]<MWL) and numpy.any(localpoints[:,2]>=MWL):
# Border case near shorelines: select the deepest point to represent whole cell
nearest[i,j] = min(localpoints[:,2])
else:
# Interpolate at center and each corner
interpolationpoints = numpy.array( [[hix[i]+resolution/2.0, hiy[j]+resolution/2.0],
[hix[i], hiy[j]],
[hix[i], hiy[j+1]],
[hix[i+1], hiy[j]],
[hix[i+1], hiy[j+1]]
] )
result = interpolate.griddata(localpoints[:,0:2],localpoints[:,2],
interpolationpoints,method='nearest')
# Average points with most weight at center point
nearest[i,j] = 0.5*result[0]+0.125*(result[1]+result[2]+result[3]+result[4])
elif len(localpoints)==1:
dbgsum += 1
nearest[i,j] = localpoints[0,2]
print("Debug: Used", dbgsum, "of", len(points), "points.")
#
# INTERPOLATE NaNs from regular grid
# https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
print("Interpolating grid...")
ix, iy = numpy.indices(nearest.shape)
interp = numpy.array(nearest)
interp[numpy.isnan(interp)] = interpolate.griddata(
(ix[~numpy.isnan(nearest)], iy[~numpy.isnan(nearest)]), # points we know
nearest[~numpy.isnan(nearest)], # values we know
(ix[numpy.isnan(nearest)], iy[numpy.isnan(nearest)]),method=interpolationmethod) # points to interpolate
# Convert from index grids to Cartesian 3-d grid with coordinates
hix2 = numpy.arange(min(points[:,0])+resolution/2.0, max(points[:,0])-resolution/2.0, resolution)
hiy2 = numpy.arange(min(points[:,1])+resolution/2.0, max(points[:,1])-resolution/2.0, resolution)
nx2 = len(hix2)
ny2 = len(hiy2)
xi,yi = numpy.meshgrid(hix2,hiy2,indexing='ij')
grid3d = numpy.stack( (xi,yi,interp), axis=2 )
#
return nx2, ny2, grid3d
################################
# findLakeName
################################
def findLakeName(gdf, lakepoly):
"""
Tries to determine the name of the lake polygon. Not perfect.
Current implementation: use the name that is closest to the
lake polygon.
To do: make this smarter or get the name some other way.
"""
if (len(gdf)==0):
return ""
distancelist = gdf.geometry.distance(lakepoly)
mindistidx = distancelist.values.argmin()
name = gdf.iloc[mindistidx]['TEKSTI']
return name
###############################
# findLakeMWL
###############################
def findLakeMWL(gdf, lakepoly):
"""
Finds lake mean water level attribute in the GeoDataFrame closest to lakepoly.
Not a foolproof implementation.
"""
if (len(gdf)==0):
return None
mwl = -1.0
mindist = -1.0
for i, text in enumerate(gdf['TEKSTI'].values):
try:
mwltemp = float(text)
dist = gdf.iloc[i].geometry.distance(lakepoly)
if (mindist<0.0) or (dist<mindist):
mindist = dist
mwl = mwltemp
except ValueError: # Skip erroneus mean water levels, e.g. controlled levels (127.1-128.5)
pass
if mwl<0.0:
print("Fatal error, mean water level not found.")
sys.exit(1)
return mwl
#################################################
# cleanGrid
#################################################
def cleanGrid(grid3d, poly_lake, mwl, minheight):
"""Make the calculated grid usable in modelling:
- remove points under mwl from outside the lake area
- connect all wet areas and/or remove unconnected parts
- low-pass filtering
- minimum depth
INCOMPLETE! TO DO: Make this better.
"""
nx = grid3d.shape[0]
ny = grid3d.shape[1]
gridres = abs(grid3d[nx-1,0,0]-grid3d[0,0,0])/(nx-1) # get grid resolution from first dimension
for i in range(0,nx):
for j in range(0,ny):
p = grid3d[i,j,:]
P = Point(p[0],p[1])
z = p[2]
if z > minheight+50.0:
grid3d[i,j,2] = mwl+50.0 # limit terrain height to +50 m over mwl
elif z >= mwl and P.within(poly_lake):
print("Warning: point", p, "within lake higher than mean water level.")
grid3d[i,j,2] = mwl+10.0
elif z < minheight and P.distance(poly_lake) > gridres:
grid3d[i,j,2] = minheight # Make all land points at least minheight high
return grid3d
###################################################
# attachFlowLocations
###################################################
def attachFlowLocations(locs, inpoints, outpoints):
"""
Add flow location/coordinate metadata to locs dictionary.
"""
allpoints = inpoints + outpoints
if len(allpoints)<=0:
print("Warning: Zero open boundaries to process.")
return # nothing to do!
# Assign flow point with minimum distance to each data site
for site in locs:
mindist = (sys.float_info.max,-1) # (distance, index)
for index, point in enumerate(allpoints,0):
dist = point.distance(Point(locs[site]['x'],locs[site]['y']))
if dist<mindist[0]:
mindist = (dist,index)
locs[site]['moved_x'] = allpoints[mindist[1]].x
locs[site]['moved_y'] = allpoints[mindist[1]].y
locs[site]['moved_distance'] = mindist[0]
if mindist[1]<len(inpoints):
locs[site]['type'] = 'in'
else:
locs[site]['type'] = 'out'
locs[site]['enabled'] = True # Enable location by default
return locs
##############################################
# getGridShape
##############################################
def getGridShape(grid3d, GRIDRESOLUTION, mwl):
"""Builds a list of rectangular polygons representing each
grid cell and the exterior of the grid shape. Used in further
processing.
"""
nx = grid3d.shape[0]
ny = grid3d.shape[1]
hr = GRIDRESOLUTION/2.0
# Build small polygon for each grid point
polylist = []
for i in range(0,nx):
for j in range(0,ny):
if ((not numpy.isnan(grid3d[i,j,2])) and grid3d[i,j,2]<mwl): # Use only wet points
p1 = (grid3d[i,j,0]-hr,grid3d[i,j,1]-hr) # lower left
p2 = (grid3d[i,j,0]+hr,grid3d[i,j,1]-hr) # lower right
p3 = (grid3d[i,j,0]+hr,grid3d[i,j,1]+hr) # top right
p4 = (grid3d[i,j,0]-hr,grid3d[i,j,1]+hr) # top left
rect = shapely.geometry.Polygon([p1,p2,p3,p4])
rect.i = i # add grid indices
rect.j = j
rect.z = grid3d[i,j,2]
rect.c = Point((grid3d[i,j,0],grid3d[i,j,1])) # center point
polylist.append(rect)
#plt.plot(*polylist[-1].exterior.xy) # Debug
#plt.show() # Debug
# Combine polygons in polylist
mergedpoly = shapely.ops.unary_union(polylist)
# Try to make a single combined polygon instead of multi-part
# TO DO: This still needs work to be usable in all cases.
if mergedpoly.type == 'MultiPolygon':
print("WARNING: Merging MultiPolygon forcibly by discarding orphaned areas.")
polyareas = []
for i, poly in enumerate(mergedpoly):
polyareas.append( [i, poly.area] )
polyareas.sort(key=lambda x: x[1])
mergedpoly = mergedpoly[polyareas[-1][0]] # Keep only polygon with largest area
#plt.plot(*mergedpoly.exterior.xy) # Debug
#plt.show() # Debug
# Removed old merging method / JR 2019-12-12 -- might be incompatible with interface finding algorithm
# print("MultiPolygon detected, attempting to merge by expanding.")
# epsilon = 0.0000000001
# while (epsilon<hr/10.0 and mergedpoly.type=='MultiPolygon'):
# mergedpoly = mergedpoly.buffer(epsilon)
# epsilon *= 10.0
# if mergedpoly.type == 'MultiPolygon':
# print("WARNING: Failed to combine MultiPolygon!")
# else:
# print("Success! Expanded polygons by", epsilon, " meters.")
return polylist, mergedpoly.exterior
##########################################################
# addIntersections
##########################################################
def addIntersections(locs, polylist, gridexterior, model):
# Finds the open boundary cell indices.
#
# First find the intersection coordinates of shortest distance line
# to grid polygon border for each flow point.
# TODO: 1) solve ambiguities in e.g. cases where the point is equidistant to multiple
# points on the grid border, 2) if point is inside an island (interior ring) then
# this will still find the coordinates at the outer exterior.
for site in locs:
# Find coordinates of closest point to flow point at the grid exterior. For algorithm,
# see https://stackoverflow.com/questions/33311616/find-coordinate-of-closest-point-on-polygon-shapely
projdist = gridexterior.project(Point(locs[site]['x'],locs[site]['y']))
extpoint = gridexterior.interpolate(projdist)
extcoords = extpoint.coords[0]
px = extcoords[0]
py = extcoords[1]
locs[site]['grid_x'] = px # x
locs[site]['grid_y'] = py # y
EPSILON = 1.0E-6 # Numerical accuracy of intersection location vs polyline
iflist = []
for pol in polylist:
# Check that pol shares a border with grid exterior. Excludes inner
# polygons and polygons that touch the grid exterior only with corners.
isect = pol.buffer(EPSILON).intersection(gridexterior)
if ( (type(isect) is shapely.geometry.MultiLineString or
type(isect) is shapely.geometry.LineString) and
isect.length>EPSILON*10.0 ):
minx = min(pol.exterior.xy[0])
maxx = max(pol.exterior.xy[0])
miny = min(pol.exterior.xy[1])
maxy = max(pol.exterior.xy[1])
hx = 0.5*(maxx-minx)+EPSILON
hy = 0.5*(maxy-miny)+EPSILON
if (abs(px-minx)<EPSILON and (abs(py-maxy)<hy or abs(py-miny)<hy)):
iflist.append(['U',pol.i,pol.j,pol.z]) # U-interface at left face
elif (abs(px-maxx)<EPSILON and (abs(py-maxy)<hy or abs(py-miny)<hy)):
iflist.append(['U',pol.i+1,pol.j,pol.z]) # U-interface at right face
if (abs(py-miny)<EPSILON and (abs(px-maxx)<hx or abs(px-minx)<hx)):
iflist.append(['V',pol.i,pol.j,pol.z]) # V-interface at bottom face
elif (abs(py-maxy)<EPSILON and (abs(px-maxx)<hx or abs(px-minx)<hx)):
iflist.append(['V',pol.i,pol.j+1,pol.z]) # V-interface at top face
if len(iflist)==0:
print("Interface list is empty! This should not happen.")
sys.exit(1)
elif len(iflist)>1:
# select which interface to use in case found interface matches e.g. corners
iflist.sort(key=lambda x: x[3]) # Sort by depth (deepest first)
locs[site]['if_indices'] = (iflist[0][1],iflist[0][2])
locs[site]['if_orientation'] = iflist[0][0]
if iflist[0][0] == 'U':
model['nrvbu'] += 1
elif iflist[0][0] == 'V':
model['nrvbv'] += 1
return locs, model
#################################
# filterLocations
#################################
def filterLocations(locs, model):
"""
Leaves only one measurement station enabled per grid location in the model
configuration. Chooses the one with original coordinates closest
to the grid location.
"""
# Possible to do list:
# - Is there a need to check if water level data matches MWL?
# - Should we combine multiple stations at same grid location?
for site in locs:
for site2 in locs:
if (locs[site]['enabled'] and locs[site2]['enabled'] and
locs[site]['if_indices'][0]==locs[site2]['if_indices'][0] and
locs[site]['if_indices'][1]==locs[site2]['if_indices'][1]):
if locs[site]['moved_distance']<locs[site2]['moved_distance']:
locs[site2]['enabled'] = False
if locs[site2]['if_orientation']=='U':
model['nrvbu'] -= 1
elif locs[site2]['if_orientation']=='V':
model['nrvbv'] -= 1
return locs, model
#####################################
# combineEnabledLocations
#####################################
def combineEnabledLocations(W_locs, Q_locs):
"""Makes a combined dictionary of enabled W and Q locations
in u- and v-orientations.
"""
uvlocations = {}
uvlocations['u'] = []
uvlocations['v'] = []
# Levels
for site in W_locs:
if W_locs[site]['enabled']:
if W_locs[site]['if_orientation'] == 'U': # west-east if
uvlocations['u'].append(W_locs[site])
uvlocations['u'][-1]['id'] = site
else: #north-south if
uvlocations['v'].append(W_locs[site])
uvlocations['v'][-1]['id'] = site
# Discharges
for site in Q_locs:
if Q_locs[site]['enabled']:
if Q_locs[site]['if_orientation'] == 'U': # west-east if
uvlocations['u'].append(Q_locs[site])
uvlocations['u'][-1]['id'] = site
else: #north-south if
uvlocations['v'].append(Q_locs[site])
uvlocations['v'][-1]['id'] = site
return uvlocations
#############################################
# getTopoDBData
#############################################
def getTopoDBData(sheets, path, fileprefix, filesuffix, classnumber, attribute):
"""Load data from all from map sheets for wanted classnumber and column.
Returns GeoDataFrame with the requested data.
"""
gdf = geopandas.GeoDataFrame()
for sheet in sheets:
print("Gathering class", classnumber, "data from map sheet", sheet)
zipfileuri = "zip://"+path+"/"+sheet[0:2]+"/"+sheet[0:3]+"/"+sheet+".shp.zip"
shapefilename = fileprefix+"_"+sheet+"_"+filesuffix
gdf_all = geopandas.read_file(zipfileuri,layer=shapefilename)
gdf = | pandas.concat( [ gdf, gdf_all[gdf_all['LUOKKA']==classnumber][[attribute,'geometry']] ], axis = 0) | pandas.concat |
import datetime as dt
import gc
import json
import logging
import os
import pickle
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import h5py
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyproj
import rasterio as rio
import simplekml
from cataloging.vi import gliImage, ngrdiImage, osaviImage
from fluidml.common import Task
#from PIL import Image
from pycpd import RigidRegistration
from pykml import parser
from rasterio.enums import Resampling
from rasterio.transform import rowcol, xy
from rasterio.windows import Window
from scipy.ndimage import distance_transform_edt
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
#from skimage.exposure import equalize_adapthist
from skimage.feature import peak_local_max
from skimage.filters import gaussian, threshold_otsu
from skimage.measure import label, regionprops
from skimage.segmentation import watershed
from skimage.transform import hough_line, hough_line_peaks, resize
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger(__name__)
# suppress pickle 'error' from rasterio
logging.Logger.manager.loggerDict['rasterio'].setLevel(logging.CRITICAL)
logging.Logger.manager.loggerDict['matplotlib'].setLevel(logging.CRITICAL)
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
def read_raster(
image_path: str,
all_channels: np.array,
channels: List[str]
):
ch = [np.argmax(all_channels == c)+1 for c in channels]
raster = rio.open(image_path)
if raster.dtypes[0] == "float32":
data = raster.read(ch, fill_value=np.nan)
data /= np.nanmax(data)
elif raster.dtypes[0] == "uint8":
if "alpha" in all_channels:
data = raster.read(ch).astype(np.float32)
alpha_ch = raster.read(int(np.argmax(all_channels == "alpha")+1))
for d in data[:,:]:
d[alpha_ch == 0] = np.nan
else:
data = raster.read(ch, fill_value=0).astype(np.float32)
else:
raise NotImplementedError()
return np.transpose(data, axes=(1,2,0))
def write_onechannel_raster(
image_path: str,
image: np.array,
meta: Dict, dtype: str
):
if dtype == 'float32':
meta.update({
'dtype': 'float32',
'height': image.shape[0],'count': 1,'nodata': -32767,
'width': image.shape[1]})
elif dtype == 'uint8':
meta.update({
'dtype': 'uint8',
'height': image.shape[0],'count': 1,'nodata': 0,
'width': image.shape[1]})
else:
raise NotImplementedError()
with rio.open(image_path, "w", **meta) as dest:
dest.write(image,1)
def calc_m_per_px(
raster_meta: Dict
) -> float:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of anchor point
lon0, lat0 = xy(raster_meta["transform"],0,0)
# calculate UTM zone
utm_zone = int(np.floor((lon0/360)*60+31))
utm = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
UTM0_x, UTM0_y = utm(*xy(raster_meta["transform"],0,0))
UTM1_x, UTM1_y = utm(*xy(raster_meta["transform"],0,1))
UTM2_x, UTM2_y = utm(*xy(raster_meta["transform"],1,0))
# calculate unit pixel distances
pxx = abs(UTM1_x - UTM0_x)
pxy = abs(UTM2_y - UTM0_y)
# take mean (assume quadratic pixels)
m_per_px = np.mean([pxx, pxy])
return m_per_px
def px_to_utm(
point_cloud: np.ndarray,
raster_meta: Dict
) -> Tuple[np.ndarray, pyproj.proj.Proj]:
# read CRS of rasterio data
proj_crs = pyproj.crs.CRS.from_user_input(raster_meta["crs"])
# GPS coordinates of point cloud
lon, lat = np.asarray(xy(raster_meta["transform"],*point_cloud.T))
# calculate UTM zone
utm_zone = int(np.floor((lon.mean()/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
utm = np.asarray(utm_transform(lon, lat)).T
return utm, utm_transform
def readCoordsFromKml(
filename: str
) -> np.ndarray:
with open(filename, "r") as kmlfile:
root = parser.parse(kmlfile).getroot()
lonlat = []
for c in root.Document.iterchildren():
lonlat.append([float(x) for x in c.Point.coordinates.text.split(",")[:2]])
lonlat = np.asarray(lonlat)
return lonlat
def growFunction(
x: float,
g: float,
lg: float,
xg: float,
d: float,
ld: float,
xd: float
) -> float:
if d > 0:
return (g/(1+np.exp(-lg*(x-xg)))) - d/(1+np.exp(-ld*(x-xd)))
else:
return (g/(1+np.exp(-lg*(x-xg))))
def cumDays(
observation_dates: Union[List[float],np.array]
) -> np.array:
cum_days = np.cumsum([d.days for d in np.diff(np.sort(observation_dates))]).astype(float)
cum_days = np.hstack((0, cum_days))
return cum_days
def growScaling(
cum_days: np.array,
bounds: Tuple,
grow_func_params: np.array
) -> np.array:
earliest, latest = bounds
grow_func = growFunction(cum_days, *grow_func_params)
maxgrow_val = np.max(grow_func)
grow_func = (grow_func - grow_func[0]) / (maxgrow_val - grow_func[0])
scaled = grow_func * (latest - earliest) + earliest
return scaled
def makeDirectory(
directory: str
) -> None:
if not os.path.exists(directory):
os.makedirs(directory)
def group_points(
points: np.array,
layers: np.array,
max_dist: float
) -> Tuple[np.array, np.array]:
nn = NearestNeighbors(n_neighbors=1, n_jobs=-1)
# initialization
# -> all labels to -1
labels = -np.ones_like(layers)
# all given layers
uni_layers = np.unique(layers)
# -> give points of first layer individual group labels
labels[layers == uni_layers[0]] = np.arange(np.sum(layers == uni_layers[0]))
# -> first evaluation point cloud: first layer
centroids = points[layers == uni_layers[0]]
ind = np.arange(len(points))
for i in range(1, len(uni_layers)):
# fit nearest neighbor model
nn.fit(centroids)
# evaluate on next layer
dist, ass_group = nn.kneighbors(points[layers == uni_layers[i]])
dist = dist.flatten()
ass_group = ass_group.flatten()
# exclude points that have more than max_dist distance to a neighbor
# new_member array:
# 1 = valid member candidate for existing group
# 0 = valid member candidate for new group
# -1 = excluded due to multiple candidates for a single group
new_member = (dist <= max_dist).astype(int)
# if multiple (valid!) points are assigned to the same group, take the nearest
valid = np.copy(new_member).astype(bool)
valid_ind = np.arange(len(valid))[valid]
for j, counts in enumerate(np.bincount(ass_group[valid])):
if counts > 1:
ass_group_ind = valid_ind[ass_group[valid] == j]
best_ind = ass_group_ind[np.argsort(dist[ass_group_ind])]
new_member[best_ind[1:]] = -1
# assign the group labels to the new members
layer_ind = ind[layers == uni_layers[i]]
old_layer_ind = layer_ind[new_member == 1]
labels[old_layer_ind] = ass_group[new_member == 1]
# give new group labels to points not registered so far
new_layer_ind = layer_ind[new_member == 0]
labels[new_layer_ind] = np.arange(labels.max()+1, labels.max()+1+len(new_layer_ind))
# new reference cloud are the centroids of the so far accumulated clusters
centroids = np.stack([np.mean(points[labels == label], axis=0) for label in range(labels.max()+1)])
return labels, centroids
def inverse_transform(
xy_centered_aligned,
xy_center,
transform_coeffs
):
s = transform_coeffs[0]
rot = np.deg2rad(transform_coeffs[1])
t = transform_coeffs[2:]
rot_inv = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
return rot_inv@(xy_centered_aligned-t).T/s + xy_center
def add_non_detected(
df_less: pd.DataFrame,
df_meta: pd.DataFrame
) -> pd.DataFrame:
dates = np.unique(df_meta["date"])
xy_center = df_meta["xy_center"].iloc[0]
df_add = pd.DataFrame()
for g_id in np.unique(df_less["group_id"]):
df_group = df_less[df_less["group_id"] == g_id]
missing_dates = dates[np.isin(dates, df_group["date"], invert=True)]
for d in missing_dates:
xy_centered_aligned = df_group["xy_centered_aligned_cm"].mean(axis=0) # group centroid [cm (UTM)]
cropline_y = df_group["y_cropline_rotated_cm"].iloc[0]
align_transform = df_meta[df_meta["date"] == d]["align_transform"].iloc[0]
gps_transform = df_meta[df_meta["date"] == d]["gps_transform"].iloc[0]
utm_transform = df_meta[df_meta["date"] == d]["utm_transform"].iloc[0]
#cr = df_meta[df_meta["date"] == d]["cover_ratio"].values
#mc = df_meta[df_meta["date"] == d]["align_median_confidence"].values
xy_backtrans = inverse_transform(xy_centered_aligned, xy_center, align_transform)
lonlat_backtrans = utm_transform(*xy_backtrans/100., inverse=True)
df_add = df_add.append(
dict([("field_id" , df_group["field_id"].iloc[0]),
("date" , d),
("group_id" , g_id),
("group_size" , df_group["group_size"].iloc[0]),
("group_cropline_id" , df_group["group_cropline_id"].iloc[0]),
("xy_cm" , xy_backtrans),
("xy_px" , list(rowcol(gps_transform, *lonlat_backtrans))),
("lonlat" , lonlat_backtrans),
("xy_centered_aligned_cm" , xy_centered_aligned),
("xy_centroid_centered_aligned_cm" , xy_centered_aligned),
("y_cropline_rotated_cm" , cropline_y),
("centroid_dist_cm" , 0.),
("detected" , False)]), ignore_index=True)
return df_add
def filterGoodPlantsByPercDet(
plants_df: pd.DataFrame,
meta_df: pd.DataFrame,
filter_coverratio: float,
perc_min_det: float
) -> pd.DataFrame:
plants_meta_df = plants_df.merge(meta_df, on=["date", "field_id"], how="left")
n_dates = len(np.unique(meta_df["date"]))
# good plant group := at least perc_min_det direct detection ratio up to certain given cover ratio
good_idx = []
for f_id in np.unique(meta_df["field_id"]):
n_counts_below_cr_thres = np.sum(np.unique(plants_meta_df[plants_meta_df["field_id"]==f_id]["cover_ratio"]) <= filter_coverratio)
groups, counts = np.unique(plants_meta_df[(plants_meta_df["field_id"]==f_id) & (plants_meta_df["cover_ratio"] <= filter_coverratio) & (plants_meta_df["detected"] == True)]["group_id"], return_counts=True)
interest_groups = groups[counts/float(n_counts_below_cr_thres) >= perc_min_det]
candidates = plants_meta_df[(plants_meta_df["field_id"]==f_id) & (np.isin(plants_meta_df["group_id"], interest_groups))]
for g_id in interest_groups:
cand_group = candidates[candidates["group_id"]==g_id]
if len(cand_group)==n_dates:
good_idx.extend(cand_group.index)
good_df = plants_meta_df.loc[good_idx].sort_values(["field_id", "group_id", "date"])
return good_df
class SegmentSoilPlants(Task):
def __init__(
self,
image_path: str,
image_channels: List[str],
veg_index: str,
use_watershed: bool,
max_coverratio: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.image_path = image_path
self.image_channels = np.asarray(image_channels)
self.veg_index = veg_index
self.use_watershed = use_watershed
self.max_coverratio = max_coverratio
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_raw(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot raw image.")
if len(self.image_channels) < 4:
n_rows, n_cols = 1, len(self.image_channels)
else:
n_rows, n_cols = 2, len(self.image_channels)//2
fig, ax = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(self.width/500*n_cols, self.height/800*n_rows))
data = read_raster(self.image_path, self.image_channels, self.image_channels)
for (i, (a, c)) in enumerate(zip(ax.ravel(), self.image_channels)):
im = a.imshow(data[:,:,i], cmap=self.plot_cmap)
try:
fig.colorbar(im, ax=a)
except:
pass
a.set(xlabel='x', ylabel='y', title = c, aspect='equal')
fig.suptitle("raw image data")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_01_channels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
del data, fig, ax, im
plt.close("all")
gc.collect()
def plot_segmentation(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot segmentation image.")
fig = plt.figure(figsize=(3*self.width/500, self.height/500), tight_layout=True)
gridspec = gs.GridSpec(1,3,width_ratios=[2,1,2], figure=fig)
ax1 = fig.add_subplot(gridspec[0])
ax2 = fig.add_subplot(gridspec[1])
ax3 = fig.add_subplot(gridspec[2])
m = ax1.imshow(self.vi_image.astype(float), cmap=self.plot_cmap, vmin=-1, vmax=1)
cb = fig.colorbar(m, ax=ax1)
cb.set_label("VI")
ax1.set(title=f"{self.veg_index} image", xlabel="px", ylabel="px")
ax2.hist(self.vi_image[np.isfinite(self.vi_image)], bins=256, orientation="horizontal", color="C0")
ax2.set(title=f"{self.veg_index} value distribution", ylim=(-1,1), xlabel="counts", xscale="log")
if self.cover_ratio_est < 0.01:
ax2.axhline(self.thres, c='r', label=f"Threshold (99-percentile): {self.thres:.2f}")
else:
ax2.axhline(self.thres, c='r', label=f"Threshold (Otsu): {self.thres:.2f}")
ax2.legend()
ax3.imshow(self.seg_mask, cmap=self.plot_cmap)
ax3.set(title=f"Segmented plant area (cover ratio: {100.*self.cover_ratio:.2f} %)", xlabel="px", ylabel="px")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_02_segmentation"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax1, ax2, ax3
gc.collect()
def run(
self
):
try:
self.field_id, d = os.path.basename(self.image_path).replace(".tif", "").split("_")[:2]
year = int(d[:4])
month = int(d[4:6])
day = int(d[6:8])
self.date = dt.datetime(year, month, day)
except:
logger.error(f"Wrong image path or no files found: {self.image_path}")
logger.info(f"{self.name}-{self.date.date()} -> Load image.")
raster = rio.open(self.image_path)
raster_meta = raster.meta
self.height, self.width = raster.shape
px_res = calc_m_per_px(raster_meta)*100. # cm/px
logger.info(f"{self.name}-{self.date.date()} -> Calculated resolution: {px_res:.4f} cm/px.")
del raster
gc.collect()
# calculate Vegetation Index which has values in [-1,1]
if self.veg_index == "NGRDI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G"])
self.vi_image = ngrdiImage(R = channels[:,:,0], G = channels[:,:,1])
est_thres = 0
elif self.veg_index == "GLI":
channels = read_raster(self.image_path, self.image_channels, ["R", "G", "B"])
self.vi_image = gliImage(R = channels[:,:,0], G = channels[:,:,1], B = channels[:,:,2])
est_thres = 0.2
elif self.veg_index == "OSAVI":
channels = read_raster(self.image_path, self.image_channels, ["R", "NIR"])
self.vi_image = osaviImage(R = channels[:,:,0], NIR = channels[:,:,1], y_osavi = 0.6)
est_thres = 0.25
del channels
gc.collect()
# cover ratio estimation
self.cover_ratio_est = np.nansum(self.vi_image >= est_thres)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Use {self.veg_index} Vegetation Index. Cover ratio estimation: {self.cover_ratio_est*100.:.2f} %")
if self.cover_ratio_est <= self.max_coverratio:
# calculate threshold with Otsu's method
if self.cover_ratio_est < 0.01:
self.thres = np.percentile(self.vi_image[np.isfinite(self.vi_image)], 99)
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio below 1 % -> Take 99-percentile as threshold: {self.thres:.2f}")
else:
self.thres = threshold_otsu(self.vi_image[np.isfinite(self.vi_image)])
logger.info(f"{self.name}-{self.date.date()} -> Otsu threshold: {self.thres:.2f}")
# segmentation
if self.use_watershed:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants with watershed method.")
markers = np.zeros_like(self.vi_image, dtype=np.uint8)
markers[self.vi_image <= self.thres] = 1 # soil
markers[self.vi_image > self.thres] = 2 # plant
self.seg_mask = (watershed(self.vi_image, markers) - 1).astype(bool) # True -> plant, False -> soil
del markers
else:
logger.info(f"{self.name}-{self.date.date()} -> Segment soil and plants without watershed method.")
self.seg_mask = np.zeros_like(self.vi_image, dtype=bool) # True -> plant, False -> soil
self.seg_mask[self.vi_image > self.thres] = True # plant
self.cover_ratio = np.sum(self.seg_mask)/np.sum(np.isfinite(self.vi_image))
logger.info(f"{self.name}-{self.date.date()} -> Cover ratio recalculated: {self.cover_ratio*100.:.2f} %")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_segmentation()
gc.collect()
else:
logger.warn(f"{self.name}-{self.date.date()} -> Estimated cover ratio ({self.cover_ratio_est*100.:.2f} %) is too high to extract plants -> Skip plot.")
self.seg_mask = []
self.cover_ratio = self.cover_ratio_est
self.save(obj=self.seg_mask, name="segmentation_mask", type_='pickle')
self.save(obj=self.cover_ratio, name="cover_ratio", type_='json')
self.save(obj=self.field_id, name="field_id", type_='json')
self.save(obj=self.date, name="date", type_='pickle')
self.save(obj=raster_meta, name="raster_meta", type_='pickle')
self.save(obj=px_res, name="px_resolution", type_='json')
if (self.make_orthoimage) and (self.seg_mask != []):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save segmentation mask as orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_segmentation.tif"),
np.uint8(self.seg_mask*255),
raster_meta,
"uint8")
# plot raw channel information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_raw()
gc.collect()
class FitGrowFunction(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name} -> Plot Grow function.")
g, lg, xg, d, ld, xd = self.fit
cd = np.linspace(0, self.cum_days[-1], 1000)
cal_days = [self.observation_dates[0] + dt.timedelta(days=x) for x in self.cum_days]
fig, ax = plt.subplots()
ax.scatter(self.cum_days, self.cover_ratios, label="observations")
if d > 0:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}-\frac{d}{1+e^{-\lambda_d(x-x_d)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}\n$d$={d:.4g}, $\\lambda_d$={ld:.4g}, $x_d$={xd:.4g}"
else:
label = r"grow function fit: $f(x)=\frac{g}{1+e^{-\lambda_g(x-x_g)}}$"+f"\n$g$={g:.4g}, $\\lambda_g$={lg:.4g}, $x_g$={xg:.4g}"
ax.plot(cd, growFunction(cd, *self.fit), c="r", label=label)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.set(xlabel="days", ylabel="cover ratio")
ax.legend()
ax.grid()
ax_dt = ax.twiny()
ax_dt.set_xlim(map(lambda cd: self.observation_dates[0] + dt.timedelta(days=cd), ax.get_xlim()))
ax_dt.set_xlabel("calendar date")
ax_dt.set_xticks(cal_days)
ax_dt.tick_params(axis='x', labelrotation=90)
ax.set(title=f"{self.field_id}: grow function fit")
savename = os.path.join(self.plot_dir, f"{self.field_id}_grow_function"+self.plot_format)
fig.savefig(savename, dpi=self.plot_dpi, bbox_inches='tight')
plt.close("all")
del fig, ax, ax_dt
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios = []
observation_dates = []
for r in reduced_results:
cover_ratios.append(r["result"]["cover_ratio"])
observation_dates.append(r["result"]["date"])
observation_dates = np.asarray(observation_dates)
cover_ratios = np.asarray(cover_ratios)
sort = np.argsort(observation_dates)
self.observation_dates = observation_dates[sort]
self.cover_ratios = cover_ratios[sort]
self.cum_days = cumDays(self.observation_dates)
self.field_id = reduced_results[0]["result"]["field_id"]
try:
self.fit, self.cov = curve_fit(growFunction, self.cum_days, self.cover_ratios,
p0=[0.8, 0.1, self.cum_days[-1]/3, 0.3, 0.1, 2*self.cum_days[-1]/3],
maxfev=1000000)
# calculate corrected cover ratios with grow function
#gf_cover_ratio = growFunction(self.cum_days, *self.fit)
#self.save(obj=gf_cover_ratio, name="grow_function_cover_ratios", type_='pickle')
#self.save(obj=self.observation_dates, name="dates", type_='pickle')
logger.info(f"{self.name} -> Grow function fitted")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
except Exception as e:
self.fit = np.nan
self.cov = np.nan
logger.warning(f"{self.name} -> Grow function could not be fitted. Error: {e}")
self.save(obj=self.fit, name="grow_function_fit_params", type_='pickle')
self.save(obj=self.cov, name="grow_function_cov_matrix", type_='pickle')
class ExtractPlantPositions(Task):
def __init__(
self,
min_peak_distance: float,
peak_threshold: float,
gauss_sigma_bounds: Tuple[float, float],
use_growfunction: bool,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.min_peak_distance = min_peak_distance
self.peak_threshold = peak_threshold
self.gauss_sigma_bounds = gauss_sigma_bounds
self.use_growfunction = use_growfunction
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot_gauss_blur(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot Gaussian blur image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
im = ax.imshow(self.blurred, cmap='gray')
ax.set(title=f"Gaussian blur ($\sigma$ = {self.sigma:.2f} px)", aspect='equal', xlabel='x [cm]', ylabel='y [cm]')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_03_gauss_blur"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_peaks(
self
):
logger.info(f"{self.name}-{self.date.date()} -> Plot peak position image.")
fig, ax = plt.subplots(figsize=(self.width/500, self.height/500))
ax.scatter(*self.peaks.T[::-1], color='red', s=2, label=f"{len(self.peaks)} peaks")
ax.imshow(self.blurred, cmap=self.plot_cmap)
ax.set(title=f"Peaks (min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px)", aspect='equal', xlabel='x [px]', ylabel='y [px]')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date.date()}_04_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
segmentation_mask: np.ndarray,
#grow_function_cover_ratios: np.array,
#dates: np.array,
px_resolution: float,
cover_ratio: float,
date: dt.datetime,
field_id: str,
raster_meta: Dict
):
self.date = date
self.field_id = field_id
self.px_res = px_resolution
if len(segmentation_mask) > 0:
# apply gaussian filter with scaled sigma
if self.use_growfunction:
raise NotImplementedError()
#cover_ratio = grow_function_cover_ratios[dates == date]
#logger.info(f"{self.name}-{self.date.date()} -> Use cover ratio from grow function fit. ({100.*cover_ratio:.2f} %)")
else:
logger.info(f"{self.name}-{self.date.date()} -> Use standard cover ratio. ({100.*cover_ratio:.2f} %)")
self.sigma = (self.gauss_sigma_bounds[0] + cover_ratio*np.diff(self.gauss_sigma_bounds)[0]) / self.px_res
logger.info(f"{self.name}-{self.date.date()} -> Blurring with sigma = {self.sigma*px_resolution:.2f} cm = {self.sigma:.2f} px.")
self.blurred = gaussian(segmentation_mask.astype(np.float32), sigma=self.sigma)
# detect peaks
logger.info(f"{self.name}-{self.date.date()} -> Detect peaks with threshold {self.peak_threshold} and min. distance = {self.min_peak_distance} cm = {self.min_peak_distance/self.px_res:.2f} px.")
self.peaks = peak_local_max(self.blurred, min_distance=int(np.round(self.min_peak_distance/self.px_res)), threshold_abs=self.peak_threshold, exclude_border=False)
# convert peak position from pixel to cm coordinates with UTM coordinate transformation
utm_peaks, utm_transform = px_to_utm(point_cloud=self.peaks, raster_meta=raster_meta)
utm_peaks *= 100 # m * 100 = cm
n_peaks = len(self.peaks)
self.height, self.width = self.blurred.shape
logger.info(f"{self.name}-{self.date.date()} -> {n_peaks} peaks detected.")
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name}-{self.date.date()} -> Save Gauss blurred orthoimage.")
write_onechannel_raster(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_blurred.tif"),
self.blurred,
raster_meta,
"float32")
logger.info(f"{self.name}-{self.date.date()} -> Export found peak positions as KML file.")
kml = simplekml.Kml()
for (lon, lat) in np.asarray(xy(raster_meta["transform"], *self.peaks.T)).T:
kml.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_{self.date.date()}_peaks.kml"))
else:
logger.warn(f"{self.name}-{self.date.date()} -> No segmentation mask due to large cover ratio -> Skip plot.")
utm_peaks = np.array([])
# calculate UTM zone
lon, lat = np.asarray(xy(raster_meta["transform"], raster_meta["height"]//2, raster_meta["width"]//2))
utm_zone = int(np.floor((lon/360)*60+31))
utm_transform = pyproj.Proj(proj='utm', zone=utm_zone, ellps='WGS84')
self.save(obj=utm_peaks, name="plant_positions", type_="pickle")
self.save(obj=utm_transform, name="utm_transform", type_="pickle")
# plot blurred image and contrast image with peak positions
if (len(segmentation_mask) > 0) and self.plot_result:
makeDirectory(self.plot_dir)
self.plot_gauss_blur()
self.plot_peaks()
gc.collect()
class LoadPeaks(Task):
def __init__(
self,
field_id: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.field_id = field_id
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
def plot(
self
):
logger.info(f"{self.name} -> Plot raw peaks image.")
fig, ax = plt.subplots()
ax.scatter(*self.C.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\nraw points", xlabel='x [cm]', ylabel='y [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_01_raw"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
cover_ratios, dates, gps_transforms, px_resolutions, field_ids, peaks, utm_transforms, segmentation_masks = [], [], [], [], [], [], [], []
for r in reduced_results:
try:
if len(r["config"].keys()) == 1:
cover_ratios.append(r["result"]["cover_ratio"])
dates.append(r["result"]["date"])
gps_transforms.append(r["result"]["raster_meta"]["transform"])
px_resolutions.append(r["result"]["px_resolution"])
field_ids.append(r["result"]["field_id"])
segmentation_masks.append(r["result"]["segmentation_mask"])
else:
peaks.append(r["result"]["plant_positions"])
utm_transforms.append(r["result"]["utm_transform"])
except:
logger.error(r)
assert len(np.unique(field_ids)) == 1, logger.error(f"{self.name} -> Multiple field IDs!")
assert np.unique(field_ids)[0] == self.field_id, logger.error(f"{self.name} -> Wrong field ID!")
cover_ratios = np.asarray(cover_ratios)
px_resolutions = np.asarray(px_resolutions)
dates = pd.DatetimeIndex(dates)
P = np.asarray(peaks)
logger.info(f"{self.name} -> Load data for {len(dates)} dates.")
# sort dates and layers by cover ratio
cr_sort = np.argsort(cover_ratios)
P = P[cr_sort]
dates = dates[cr_sort]
segmentation_masks = [segmentation_masks[c] for c in cr_sort]
gps_transforms = [gps_transforms[c] for c in cr_sort]
px_resolutions = px_resolutions[cr_sort]
cover_ratios = np.sort(cover_ratios)
n_layers = len(dates)
logger.info(f"{self.name} -> Sorted dates and layers by cover ratio. Layers: {cr_sort}, dates: {dates}, cover ratios: {cover_ratios}")
# dates for printing (e.g. in plots)
printdates = dates.format(formatter=lambda x: x.strftime('%m-%d'))
emptymask = [len(p)>0 for p in P]
logger.info(f"{self.name} -> Peaks for {np.sum(emptymask)} dates available.")
# stack point clouds and save layers
self.C = np.vstack(P[emptymask])
self.layers = np.repeat(np.arange(len(P)), np.array([len(p) for p in P]))
self.save(obj=self.C, name="point_cloud", type_="pickle")
self.save(obj=self.layers, name="layers", type_="pickle")
self.save(obj=cover_ratios, name="cover_ratios", type_="pickle")
self.save(obj=self.field_id, name="field_id", type_="json")
self.save(obj=printdates, name="printdates", type_="pickle")
self.save(obj=dates, name="dates", type_="pickle")
self.save(obj=gps_transforms, name="gps_transforms", type_="pickle")
self.save(obj=px_resolutions, name="px_resolutions", type_="pickle")
self.save(obj=utm_transforms, name="utm_transforms", type_="pickle")
self.save(obj=segmentation_masks, name="segmentation_masks", type_="pickle")
# plot raw point information
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class AlignPoints(Task):
def __init__(
self,
max_centroid_distance_cpd: float,
max_centroid_distance_group: float,
make_orthoimage: bool,
orthoimage_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.max_centroid_distance_cpd = max_centroid_distance_cpd
self.max_centroid_distance_group = max_centroid_distance_group
self.make_orthoimage = make_orthoimage
self.orthoimage_dir = orthoimage_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def transform(
coords: np.array,
T: np.array
) -> np.array:
return T[0]*coords@T[1] + T[2]
def plot_aligned(
self
):
logger.info(f"{self.name} -> Plot aligned peak position image.")
fig, ax = plt.subplots()
ax.scatter(*self.P_aligned.T, s=2, alpha=0.8, c=self.layers, cmap=self.plot_cmap)
ax.set(title=f"{self.field_id}\naligned points\naligned dates: {self.aligned_dates}", xlabel='x - mean [cm]', ylabel='y - mean [cm]', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_02_aligned"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_confidence(
self
):
logger.info(f"{self.name} -> Plot alignment mean confidence.")
fig, ax = plt.subplots()
ax.scatter(100*self.cover_ratios, 100*self.median_conf)
ax.set(xlim=(0,100), ylim=(0,100), title=f"{self.field_id}\n", xlabel='cover ratio [%]', ylabel='median alignment confidence [%]', aspect='equal')
ax.grid()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_03_cr_vs_conf"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud: np.ndarray,
layers: np.array,
cover_ratios: np.array,
printdates: np.array,
field_id: str,
utm_transforms: List
):
self.field_id = field_id
self.layers = layers
self.printdates = printdates
self.cover_ratios = cover_ratios
uni_layers = np.sort(np.unique(layers))
n_layers = len(self.cover_ratios)
# centralize point clouds
# calculate centroid of all points in UTM coordinates
P_mean = point_cloud.mean(axis=0)
# apply on point cloud
P_c = point_cloud - P_mean
scaF = np.ones(n_layers)
rotA = np.zeros(n_layers)
traV = np.zeros((n_layers, 2))
self.median_conf = np.nan*np.ones(n_layers)
self.P_aligned = P_c.copy()
P_centroid = P_c[layers == uni_layers[0]]
self.P_aligned[layers == uni_layers[0]] = P_centroid
aligned_layers = []
for l in uni_layers:
if l != 0:
X = P_centroid
Y = P_c[layers == l]
# filter points with no neighbours inside max_dist radius
nnX = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnY = NearestNeighbors(n_neighbors=1, n_jobs=-1)
nnX.fit(X)
nnY.fit(Y)
distXY, _ = nnY.kneighbors(X)
distYX, _ = nnX.kneighbors(Y)
X_filt = X[(distXY <= self.max_centroid_distance_cpd).flatten()]
Y_filt = Y[(distYX <= self.max_centroid_distance_cpd).flatten()]
# Rigid Transformation: T(X) = s*R@X + t
# s: scaling factor
# R: rotation matrix
# t: translation vector
# <NAME>, <NAME>: "Point Set Registration: Coherent Point Drift"
# https://arxiv.org/pdf/0905.2635.pdf
# registration with filtered points
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} -> Try to align {len(Y_filt)} of {len(Y)} points to {len(X_filt)} of {len(X)} centroids. Maximum centroid distance: {self.max_centroid_distance_cpd} cm.")
reg = RigidRegistration(X=X_filt, Y=Y_filt) # X = target, Y = source
_, T = reg.register()
self.median_conf[l] = np.median(np.max(reg.P, axis=1))
# if registration was confident (median confidence above 68%) accept, else discard
#if self.median_conf[l] > 0.68:
scaF[l] = T[0]
rotA[l] = np.rad2deg(np.arccos(T[1][0,0]))
traV[l] = T[2]
self.P_aligned[layers == l] = self.transform(Y, T)
aligned_layers.append(l)
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} alignable layers aligned. Scaling factor: {scaF[l]}. Rotation angle: {rotA[l]} °. Translation vector: {traV[l]} cm. Median confidence: {100.*self.median_conf[l]:.2f} %")
#else:
# logger.warn(f"{self.name} -> Layer {l} of {len(uni_layers)} has too low median confidence ({100.*self.median_conf[l]:.2f} %). Layer will not be aligned.")
#if l <= self.max_reference_layer:
logger.info(f"{self.name} -> Layer {l} of {len(uni_layers)} -> Group with maximum centroid distance: {self.max_centroid_distance_group} cm.")
_, P_centroid = group_points(self.P_aligned[self.layers <= l],
self.layers[self.layers <= l],
max_dist=self.max_centroid_distance_group)
logger.info(f"{self.name} -> All points aligned.")
self.save(obj=self.P_aligned, name="point_cloud_aligned", type_="pickle")
self.save(obj=P_mean, name="point_cloud_mean", type_="pickle")
self.save(obj=(scaF, rotA, traV, self.median_conf), name="align_transform", type_="pickle")
self.aligned_dates = np.asarray(self.printdates)[aligned_layers].tolist()
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_aligned()
self.plot_confidence()
gc.collect()
if (self.make_orthoimage):
makeDirectory(self.orthoimage_dir)
logger.info(f"{self.name} -> Export aligned point cloud as KML file.")
kml = simplekml.Kml()
for l in uni_layers:
folder = kml.newfolder(name=self.printdates[l])
for (lon, lat) in np.asarray(utm_transforms[l](*((self.P_aligned[self.layers == l]+P_mean)/100.).T, inverse=True)).T:
folder.newpoint(coords=[(lon, lat)])
kml.save(os.path.join(self.orthoimage_dir, f"{self.field_id}_peaks_aligned.kml"))
class AlignCroplines(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def rotation2d(
deg: float
) -> np.array:
a = np.deg2rad(deg)
return np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
def findHoughAnglesNested(
self,
image: np.ndarray,
i_max: int,
steps: int,
bin_tolerance: int
) -> Tuple[np.array, np.array, np.array, np.array, np.array]:
test_angles = np.linspace(-np.pi/2, np.pi/2, steps, endpoint=False)
mean, std = 0, np.pi/2
for i in range(i_max):
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> Perform Hough transform for {steps} angles in [{np.rad2deg(test_angles.min())}, {np.rad2deg(test_angles.max())}]°.")
h, theta, d = hough_line(image, theta=test_angles)
_, angles, dists = hough_line_peaks(h, theta, d)
hist, bins = np.histogram(angles, bins=steps, range=(test_angles.min(), test_angles.max()))
mean = np.mean(angles)
std = np.std(angles, ddof=1)
a_min = bins[np.max((0, np.argmax(hist)-bin_tolerance))]
a_max = bins[np.min((steps, np.argmax(hist)+1+bin_tolerance))]
test_angles = np.linspace(a_min, a_max, steps)
if np.all(np.mean(angles) == angles):
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> Terminate! Best alpha = {np.rad2deg(mean):.4f} °.")
return (angles, dists, h, theta, d)
else:
logger.info(f"{self.name} -> Iteration {i}/{i_max} -> alpha = ({np.rad2deg(mean):.4f} +/- {np.rad2deg(std):.4f}) °.")
logger.info(f"{self.name} -> Best alpha after {i_max} iterations = ({np.rad2deg(mean):.4f} +/- {np.rad2deg(std):.4f}) °.")
return (angles, dists, h, theta, d)
def plot(
self
):
logger.info(f"{self.name} -> Plot cropline rotation.")
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
ax = axes.ravel()
ax[0].imshow(self.hough_img, cmap=self.plot_cmap)
ax[0].set_title('image')
ax[1].imshow(self.hough_img, cmap=self.plot_cmap)
ax[1].set_ylim((self.hough_img.shape[0], 0))
ax[1].set_title('detected lines')
for angle, dist in zip(self.angles, self.dists):
(x0, y0) = dist * np.array([np.cos(angle), np.sin(angle)])
ax[1].axline((x0, y0), slope=np.tan(angle + np.pi/2))
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_04_rot_angle"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_aligned: np.ndarray,
printdates: np.array,
field_id: str
):
self.field_id = field_id
point_cloud = point_cloud_aligned
self.printdates = printdates
# Hough transform with fixed resolution (cm/px)
res = 1 # cm/px
logger.info(f"{self.name} -> Bin point cloud into image with resolution {res} cm/px.")
self.hough_img, _, _ = np.histogram2d(*point_cloud.T,
bins=[
np.arange(point_cloud[:,0].min(), point_cloud[:,0].max(), res),
np.arange(point_cloud[:,1].min(), point_cloud[:,1].max(), res)
])
# perform iterative Hough line detection with nested intervals method
i_max = 50
steps = 180
bin_tolerance = 2
self.angles, self.dists, self.h, self.theta, self.d = self.findHoughAnglesNested(self.hough_img, i_max, steps, bin_tolerance)
self.alpha_best = np.rad2deg(np.mean(self.angles))
self.alpha_best_std = np.rad2deg(np.std(self.angles, ddof=1))
# median cropline distance
d_cl_median = np.median(np.diff(np.sort(self.dists))) * res # px * (cm/px) = cm
coords_rot = (self.rotation2d(self.alpha_best)@point_cloud.T).T
logger.info(f"{self.name} -> Croplines rotated with best angle: ({self.alpha_best:.4f} +/- {self.alpha_best_std:.4f}) °. Median cropline distance: {d_cl_median:.4f} cm.")
self.save(obj=coords_rot, name="point_cloud_rotated", type_="pickle")
self.save(obj=self.alpha_best, name="rotation_angle", type_="json")
self.save(obj=d_cl_median, name="median_cropline_distance", type_="json")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class FindCroplines(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot_peaks(
self
):
logger.info(f"{self.name} -> Plot cropline peak positions.")
fig, ax = plt.subplots()
ax.plot(self.y_test, self.incl_points_sum)
ax.scatter(self.y_test[self.peak_pos], self.incl_points_sum[self.peak_pos], s=20, c='r', label=f"{len(self.peak_pos)} peaks")
ax.set(xlabel='position of window center (y-coords of rotated points)', ylabel='points inside window',
xlim=(self.Y.min()-self.scan_window, self.Y.max()+self.scan_window), ylim=(0,None))
ax.legend()
ax.set(title=f"{self.field_id}\ncropline peaks")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_05_cropline_peaks"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def plot_croplines(self):
logger.info(f"{self.name} -> Plot rotated points with marked croplines.")
fig, ax = plt.subplots()
ax.scatter(*self.point_cloud.T, s=2, alpha=1, c="C0")
ax.hlines(self.croplines_ypos, xmin = self.point_cloud[:,0].min(), xmax = self.point_cloud[:,0].max(), color='r')
ax.set(title=f"{self.field_id}\nrotated points with croplines", xlabel='x - mean (rotated)', ylabel='y - mean (rotated)', aspect='equal')
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_06_croplines"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_rotated: np.ndarray,
field_id: str,
median_cropline_distance: float,
px_resolutions: np.ndarray
):
self.field_id = field_id
self.point_cloud = point_cloud_rotated
self.Y = self.point_cloud[:,1]
scan_resolution = 10000 # steps per cropline
self.scan_window = median_cropline_distance / 10
self.scan_precision = median_cropline_distance / scan_resolution
logger.info(f"{self.name} -> Given cropline distance estimate of {median_cropline_distance} cm results in a scan window of {self.scan_window} cm and precision of {self.scan_precision} cm.")
self.y_test = np.arange(self.Y.min()-self.scan_window, self.Y.max()+self.scan_window, self.scan_precision)
incl_points_sum = []
for y_center in self.y_test:
incl_points_sum.append(np.sum((self.Y >= y_center-(self.scan_window/2)) & (self.Y <= y_center+(self.scan_window/2))))
self.incl_points_sum = np.asarray(incl_points_sum)
self.peak_pos = find_peaks(self.incl_points_sum, distance=int(0.75*scan_resolution))[0]
self.croplines_ypos = self.y_test[self.peak_pos]
logger.info(f"{self.name} -> {len(self.croplines_ypos)} croplines found: {self.croplines_ypos}")
self.save(obj=self.croplines_ypos, name="croplines_ypos", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot_peaks()
self.plot_croplines()
gc.collect()
class FilterWeed(Task):
def __init__(
self,
threshold_factor: float,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int
):
super().__init__()
self.threshold_factor = threshold_factor
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
@staticmethod
def find_nearest(
array: np.array,
values: np.array
) -> np.array:
indices = np.abs(np.subtract.outer(array, values)).argmin(axis=0)
return array[indices]
def plot(
self
):
logger.info(f"{self.name} -> Plot point cloud with masked weed.")
fig, ax = plt.subplots()
ax.scatter(*self.point_cloud_aligned_filtered.T, s=5, alpha=1, label="valid")
ax.scatter(*self.point_cloud_aligned[~self.weedmask].T, s=5, alpha=1, color='r', label=f"Weed ({self.weed_percentage:.2f} %)")
ax.set(title=f"{self.field_id}\nmasked weed", xlabel='x - mean [cm]', ylabel='y - mean [cm]', aspect='equal')
ax.legend()
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_07_weed_mask"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
point_cloud_rotated: np.ndarray,
point_cloud_aligned: np.ndarray,
point_cloud: np.ndarray,
layers: np.array,
croplines_ypos: np.array,
field_id: str
):
self.field_id = field_id
self.point_cloud_aligned = point_cloud_aligned
median_line_distance = np.median(np.diff(croplines_ypos))
next_line_distance = np.abs(point_cloud_rotated[:,1] - self.find_nearest(croplines_ypos, point_cloud_rotated[:,1]))
logger.info(f"{self.name} -> Calculated median seeding line distance: {median_line_distance:.2f} cm. Masking weed with threshold factor {self.threshold_factor}.")
self.weedmask = next_line_distance <= self.threshold_factor*median_line_distance
self.weed_percentage = 100*np.sum(~self.weedmask)/len(point_cloud_aligned)
if self.weed_percentage < 30:
logger.info(f"{self.name} -> {np.sum(~self.weedmask)} points masked as weed ({self.weed_percentage:.2f} %).")
else:
logger.warn(f"{self.name} -> High percentage of points masked as weed ({self.weed_percentage:.2f} %). There might be an error in the analysis.")
self.point_cloud_aligned_filtered, point_cloud_rotated_filtered, point_cloud_filtered, layers_filtered = point_cloud_aligned[self.weedmask], point_cloud_rotated[self.weedmask], point_cloud[self.weedmask], layers[self.weedmask]
self.save(obj=self.weedmask, name="weedmask", type_="pickle")
self.save(obj=self.point_cloud_aligned_filtered, name="point_cloud_aligned_weedfiltered", type_="pickle")
self.save(obj=point_cloud_rotated_filtered, name="point_cloud_rotated_weedfiltered", type_="pickle")
self.save(obj=point_cloud_filtered, name="point_cloud_weedfiltered", type_="pickle")
self.save(obj=layers_filtered, name="layers_weedfiltered", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class GroupPoints(Task):
def __init__(
self,
max_centroid_distance: float
):
super().__init__()
self.max_centroid_distance = max_centroid_distance
def run(
self,
point_cloud_weedfiltered: np.array,
point_cloud_aligned_weedfiltered: np.array,
point_cloud_rotated_weedfiltered: np.array,
layers_weedfiltered: np.array
):
labels, centroids = group_points(point_cloud_aligned_weedfiltered, layers_weedfiltered, max_dist=self.max_centroid_distance)
labels_dist = np.bincount(np.bincount(labels[labels>=0]))[1:]
logger.info(f"{self.name} -> {labels.max()+1} groups found with distribution {labels_dist}, {np.sum(labels==-1)}/{len(labels)} points discarded.")
# filter discarded points out
point_cloud_aligned_weedfiltered = point_cloud_aligned_weedfiltered[labels>=0]
point_cloud_rotated_weedfiltered = point_cloud_rotated_weedfiltered[labels>=0]
point_cloud_weedfiltered = point_cloud_weedfiltered[labels>=0]
layers_weedfiltered = layers_weedfiltered[labels>=0]
labels = labels[labels>=0]
self.save(obj=point_cloud_weedfiltered, name="point_cloud_weedfiltered_grouped", type_="pickle")
self.save(obj=point_cloud_aligned_weedfiltered, name="point_cloud_aligned_weedfiltered_grouped", type_="pickle")
self.save(obj=point_cloud_rotated_weedfiltered, name="point_cloud_rotated_weedfiltered_grouped", type_="pickle")
self.save(obj=labels, name="group_labels", type_="pickle")
self.save(obj=layers_weedfiltered, name="layers_weedfiltered_grouped", type_="pickle")
class SortGroupLabels(Task):
def __init__(
self,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: int,
plot_cmap: str
):
super().__init__()
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
self.plot_cmap = plot_cmap
@staticmethod
def centroid(
points
):
return points.mean(axis=0)
@staticmethod
def find_nearest_index(
array,
values
):
indices = np.abs(np.subtract.outer(array, values)).argmin(axis=0)
return indices
def plot(
self
):
logger.info(f"{self.name} -> Plot sorted and unsorted group labels.")
fig, ax = plt.subplots(1, 2, sharey=True)
ax[0].scatter(self.point_cloud[:,0], self.point_cloud[:,1], s=1, c=self.group_labels, alpha=0.6, cmap=self.plot_cmap)
sc = ax[1].scatter(self.point_cloud[:,0], self.point_cloud[:,1], s=1, c=self.labels_sorted, alpha=0.6, cmap=self.plot_cmap)
cbar = fig.colorbar(sc, ax=ax)
cbar.set_label("group ID")
for a in ax:
a.set(xlabel='x - mean [cm]', aspect='equal')
ax[0].set(ylabel='y - mean [cm]')
fig.suptitle(f"{self.field_id}\nsort group IDs")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_08_sorted_labels"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
field_id: str,
point_cloud_rotated_weedfiltered_grouped: np.ndarray,
group_labels: np.array,
croplines_ypos: np.array
):
self.point_cloud = point_cloud_rotated_weedfiltered_grouped
self.group_labels = group_labels
self.field_id = field_id
self.labels_sorted = -1*np.ones_like(group_labels)
group_centroids = np.array([self.centroid(self.point_cloud[group_labels == l]) for l in range(group_labels.max()+1)])
group_cropline_ids = self.find_nearest_index(croplines_ypos, group_centroids[:,1])
group_order = np.lexsort((group_centroids[:,0], group_cropline_ids))
for l_old, l_new in enumerate(group_order):
self.labels_sorted[group_labels == l_new] = l_old
group_cropline_ids_sorted = group_cropline_ids[group_labels]
_, group_sizes = np.unique(self.labels_sorted, return_counts=True)
self.save(obj=self.labels_sorted, name="group_labels_sorted", type_="pickle")
self.save(obj=group_cropline_ids_sorted, name="group_cropline_ids_sorted", type_="pickle")
self.save(obj=group_sizes, name="group_sizes_sorted", type_="pickle")
if self.plot_result:
makeDirectory(self.plot_dir)
self.plot()
gc.collect()
class SavePlantsDataFrame(Task):
def __init__(
self,
save_dir: str
):
super().__init__()
self.save_dir = save_dir
def run(
self,
field_id: str,
dates: pd.DatetimeIndex,
cover_ratios: np.array,
gps_transforms: List,
px_resolutions: np.array,
utm_transforms: List,
point_cloud_mean: np.ndarray,
align_transform: Tuple[Union[np.array,np.ndarray]],
rotation_angle: float,
layers_weedfiltered_grouped: np.array,
group_sizes_sorted: np.array,
group_cropline_ids_sorted: np.array,
point_cloud_weedfiltered_grouped: np.ndarray,
point_cloud_aligned_weedfiltered_grouped: np.ndarray,
group_labels_sorted: np.array,
croplines_ypos: np.array
):
# back-transform peak position data from cm (UTM) into GPS coordinates
point_cloud_weedfiltered_grouped_gps = np.hstack([utm_transforms[l](*point_cloud_weedfiltered_grouped[layers_weedfiltered_grouped == l].T/100., inverse=True) for l in np.unique(layers_weedfiltered_grouped)]).T
(scaF, rotA, traV, median_conf) = align_transform
align_transform_ = np.vstack((scaF, rotA, traV[:,0], traV[:,1])).T # cm
group_centroids = np.array([point_cloud_aligned_weedfiltered_grouped[group_labels_sorted == l].mean(axis=0) for l in range(group_labels_sorted.max()+1)]) # cm
n_layers = len(dates)
df_meta = pd.DataFrame()
for i in range(len(dates)):
df_meta = df_meta.append(
dict([("field_id" , field_id),
("date" , dates.values[i]),
("cover_ratio" , cover_ratios[i]), # %
("xy_center" , point_cloud_mean), # cm (UTM)
("align_median_confidence" , median_conf[i]), # %
("align_transform" , align_transform_[i]), # cm (UTM)
("gps_transform" , gps_transforms[i]), # px <-> lonlat
("px_resolution" , px_resolutions[i]), # cm/px
("utm_transform" , utm_transforms[i]), # m (UTM) <-> lonlat
("rotation_angle" , rotation_angle)]), ignore_index=True) # degree
df_plants = pd.DataFrame()
for i in range(len(group_labels_sorted)):
df_plants = df_plants.append(
dict([("field_id" , field_id),
("date" , dates.values[layers_weedfiltered_grouped[i]]),
("group_id" , group_labels_sorted[i]),
("group_size" , group_sizes_sorted[group_labels_sorted[i]]),
("group_cropline_id" , group_cropline_ids_sorted[i]),
("xy_cm" , point_cloud_weedfiltered_grouped[i]), # cm (UTM)
("xy_px" , list(rowcol(gps_transforms[np.argmax(dates.values==dates.values[layers_weedfiltered_grouped[i]])], *point_cloud_weedfiltered_grouped_gps[i]))), # px
("lonlat" , point_cloud_weedfiltered_grouped_gps[i]), # lonlat
("xy_centered_aligned_cm" , point_cloud_aligned_weedfiltered_grouped[i]), # cm (UTM)
("xy_centroid_centered_aligned_cm" , group_centroids[group_labels_sorted[i]]), # cm (UTM)
("y_cropline_rotated_cm" , croplines_ypos[group_cropline_ids_sorted[i]]), # cm (UTM)
("centroid_dist_cm" , np.sqrt(np.sum((point_cloud_aligned_weedfiltered_grouped[i]-group_centroids[group_labels_sorted[i]])**2))), # cm (UTM)
("detected" , True)]), ignore_index=True)
logger.info(f"{self.name} -> Detected plants added to DataFrame.")
df_plants = df_plants.append(add_non_detected(df_plants[df_plants["group_size"] < n_layers], df_meta))
df_plants["field_id"] = df_plants["field_id"].astype(str)
df_plants["group_id"] = df_plants["group_id"].astype(int)
df_plants["group_size"] = df_plants["group_size"].astype(int)
df_plants["group_cropline_id"] = df_plants["group_cropline_id"].astype(int)
df_plants["detected"] = df_plants["detected"].astype(bool)
df_plants = df_plants.sort_values(by=["group_id", "date"], ignore_index=True)
ndates = len(df_plants["date"].value_counts())
logger.info(f"{self.name} -> Complemented DataFrame with non-detected plant positions. {ndates}/{len(dates.values)} dates available.")
makeDirectory(self.save_dir)
plants_save_path = os.path.join(self.save_dir, f"{field_id}_plants.pkl")
meta_save_path = os.path.join(self.save_dir, f"{field_id}_meta.pkl")
try:
df_plants.to_pickle(plants_save_path)
logger.info(f"{self.name} -> DataFrame with plants saved at {plants_save_path}.")
df_meta.to_pickle(meta_save_path)
logger.info(f"{self.name} -> DataFrame with metadata saved at {meta_save_path}.")
except:
logger.error(f"{self.name} -> Could not save DataFrames.")
self.save(obj="", name="_dummy", type_="json")
class EvaluateDetectionQuality(Task):
def __init__(
self,
df_dir: str,
image_dir: str,
ground_truth_dir: str,
image_channels: List[str],
max_distance: float,
save_dir: str,
plot_result: bool,
plot_dir: str,
plot_format: str,
plot_dpi: float
):
super().__init__()
self.df_dir = df_dir
self.image_dir = image_dir
self.ground_truth_dir = ground_truth_dir
self.image_channels = np.asarray(image_channels)
self.max_distance = max_distance
self.save_dir = save_dir
self.plot_result = plot_result
self.plot_dir = plot_dir
self.plot_format = plot_format
self.plot_dpi = plot_dpi
def plot(
self
):
logger.info(f"{self.name}-{self.date} -> Plot detections on image.")
fig, ax = plt.subplots(figsize=(self.width/1000, self.height/1000))
ax.imshow(self.img)
if self.kml_filepath != "":
ax.scatter(*self.gtxy.T[::-1], label=f"ground truth ({len(self.gtxy)})", s = 10, color="C0", alpha=0.5, marker="o")
if self.pxy_direct != []:
ax.scatter(*self.pxy_direct.T[::-1], label=f"direct detection ({len(self.pxy_direct)})", color="C2", s=1)
if self.pxy_indirect != []:
ax.scatter(*self.pxy_indirect.T[::-1], label=f"indirect detection ({len(self.pxy_indirect)})", color="C3", s=1)
ax.legend()
if self.kml_filepath != "":
ax.set(title = f"{self.field_id}@{self.date}\nRecall = {100 * self.TP/(self.TP+self.FN):.2f} %\nPrecision = {100 * self.TP/(self.TP+self.FP):.2f} %")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date}_detections_gt"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
else:
ax.set(title = f"{self.field_id}@{self.date}")
fig.savefig(os.path.join(self.plot_dir, f"{self.field_id}_{self.date}_detections"+self.plot_format), dpi=self.plot_dpi, bbox_inches='tight')
fig.clf()
plt.close("all")
del fig, ax
def run(
self,
field_id: str,
dates: pd.DatetimeIndex,
gps_transforms: List,
utm_transforms: List,
px_resolutions: np.array
):
self.field_id = field_id
px_res = np.mean(px_resolutions)
plants_all = pd.read_pickle(os.path.join(self.df_dir, f"{self.field_id}_plants.pkl"))
#meta = pd.read_pickle(os.path.join(self.df_dir, f"{self.field_id}_meta.pkl"))
# filter out all inderect detections before the first detection happend
drop_ind = []
sorted_dates = sorted(dates)
for g_id in np.unique(plants_all.group_id):
group = plants_all[plants_all.group_id == g_id]
first_detection_date = sorted_dates[group.detected.argmax()]
drop_ind.extend(group.index[group.date < first_detection_date])
plants = plants_all.drop(drop_ind)
logger.info(f"{self.name} -> Filtered out leading indirect detections: {len(plants)}/{len(plants_all)} ({100.*len(plants)/len(plants_all):.2f} %) remaining.")
del plants_all
results = dict()
# iterate over all dates
for date, utm_transform, gps_transform in zip(dates, utm_transforms, gps_transforms):
self.date = date._date_repr
filedate = self.date.replace("-","")
logger.info(f"{self.name}-{self.date} -> Calculate detection quality.")
# retrieve image and shape file, if available
kml_filepath = list(glob(f"{self.ground_truth_dir}/{field_id}_{filedate}*.kml"))
tif_filepath = list(glob(f"{self.image_dir}/{field_id}_{filedate}*.tif"))[0]
if len(kml_filepath) > 1:
logger.warn(f"{self.name}-{self.date} -> Multiple ground truth shape files found for image {os.path.basename(tif_filepath)}. ",
f"Take first one in list: {os.path.basename(kml_filepath[0])}.")
self.kml_filepath = kml_filepath[0]
elif len(kml_filepath) == 1:
logger.info(f"{self.name}-{self.date} -> Ground truth shape file found.")
self.kml_filepath = kml_filepath[0]
else:
logger.warn(f"{self.name}-{self.date} -> No ground truth shape file found.")
self.kml_filepath = ""
# if ground truth data available, load positions
if self.kml_filepath != "":
try:
gtlatlon = readCoordsFromKml(self.kml_filepath)
gtutm = np.asarray(utm_transform(*gtlatlon.T)).T
self.gtxy = np.asarray(rowcol(gps_transform, xs=gtlatlon[:,0], ys=gtlatlon[:,1], op=lambda x: x)).T
except Exception as e:
logger.warn(f"{self.name}-{self.date} -> Could not load shape file. Error: {e}. Continue without ground truth data.")
gtutm = []
self.gtxy = []
self.kml_filepath = ""
else:
gtutm = []
self.gtxy = []
# load indirect and (if available direct) detections
try:
self.pxy_indirect = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==False)]["xy_px"].values)
plonlat_indirect = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==False)]["lonlat"].values)
except:
self.pxy_indirect = []
plonlat_indirect = []
try:
self.pxy_direct = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==True)]["xy_px"].values)
plonlat_direct = np.vstack(plants[(plants["field_id"] == field_id) & (plants["date"] == date) & (plants["detected"]==True)]["lonlat"].values)
except:
self.pxy_direct = []
plonlat_direct = []
if (plonlat_indirect != []) and (plonlat_direct != []):
plonlat = np.vstack((plonlat_indirect, plonlat_direct))
elif plonlat_indirect != []:
plonlat = plonlat_indirect
else:
plonlat = plonlat_direct
pxy_utm = np.asarray(utm_transform(*plonlat.T)).T
# initalize results dictionary
results[self.date] = {
"true_positive": np.nan,
"false_positive": np.nan,
"false_negative": np.nan
}
# connect detection with ground truth and extract true/false positives and false negatives
if self.kml_filepath != "":
logger.info(f"{self.name}-{self.date} -> Compare detections with ground truth plant positions (max. tolerance radius: {self.max_distance} cm.")
nn = NearestNeighbors(n_neighbors=1).fit(gtutm)
dist, ind = map(lambda x: x.flatten(), nn.kneighbors(pxy_utm))
self.TP, self.FP, self.FN = 0, 0, 0
for i in range(len(gtutm)):
i_dist = dist[ind == i]
in_radius = i_dist <= self.max_distance/100.
if np.sum(in_radius) > 0:
self.TP += 1
self.FP += len(i_dist) - 1
else:
self.FN += 1
self.FP += len(i_dist)
results[self.date]["true_positive"] = self.TP
results[self.date]["false_positive"] = self.FP
results[self.date]["false_negative"] = self.FN
if self.plot_result:
self.img = read_raster(tif_filepath, self.image_channels, ["R", "G", "B"])
self.img /= np.nanmax(self.img)
self.height, self.width, n_channels = self.img.shape
makeDirectory(self.plot_dir)
self.plot()
del self.img
gc.collect()
# write results to a DataFrame
logger.info(f"{self.name} -> Write results to DataFrame.")
quality_df = pd.DataFrame()
for date, values in results.items():
quality_df = quality_df.append(
dict([("field_id" , self.field_id),
("date" , date),
("true_positive" , values["true_positive"]),
("false_positive" , values["false_positive"]),
("false_negative" , values["false_negative"])]), ignore_index=True)
quality_df["precision"] = 100 * quality_df["true_positive"]/(quality_df["true_positive"]+quality_df["false_positive"])
quality_df["recall"] = 100 * quality_df["true_positive"]/(quality_df["true_positive"]+quality_df["false_negative"])
quality_df["true_positive"] = quality_df["true_positive"].apply(lambda x: int(x) if pd.notna(x) else x)
quality_df["false_positive"] = quality_df["false_positive"].apply(lambda x: int(x) if pd.notna(x) else x)
quality_df["false_negative"] = quality_df["false_negative"].apply(lambda x: int(x) if pd.notna(x) else x)
quality_df["date"] = pd.DatetimeIndex(quality_df["date"])
quality_df = quality_df.sort_values(["date"])
quality_save_path = os.path.join(self.save_dir, f"{self.field_id}_det_quality.pkl")
try:
quality_df.to_pickle(quality_save_path)
logger.info(f"{self.name} -> DataFrame with detection results saved at {quality_save_path}.")
except:
logger.error(f"{self.name} -> Could not save DataFrame.")
self.save(obj=results, name="detection_quality", type_="json")
self.save(obj=self.pxy_direct, name="direct_detections_xy", type_="pickle")
self.save(obj=self.pxy_indirect, name="indirect_detections_xy", type_="pickle")
self.save(obj=self.gtxy, name="ground_truth_xy", type_="pickle")
class MergePlantsDataFrame(Task):
def __init__(
self,
save_dir: str
):
super().__init__()
self.save_dir = save_dir
def run(
self,
reduced_results: List[Dict[str, Dict]]
):
plants_df_paths = sorted(glob(os.path.join(self.save_dir, f"*_plants.pkl")))
meta_df_paths = sorted(glob(os.path.join(self.save_dir, f"*_meta.pkl")))
quality_df_paths = sorted(glob(os.path.join(self.save_dir, f"*_det_quality.pkl")))
logger.info(f"{self.name} -> Merge DataFrames of plants, metadata, and detection quality.")
plants_df = pd.DataFrame()
for p in plants_df_paths:
plants_df = plants_df.append(pd.read_pickle(p), ignore_index=True)
df_save_path = os.path.join(self.save_dir, "plants.pkl")
plants_df.to_pickle(df_save_path)
logger.info(f"{self.name} -> Plant DataFrames merged successfully at {df_save_path}.")
meta_df = pd.DataFrame()
for p in meta_df_paths:
meta_df = meta_df.append(pd.read_pickle(p), ignore_index=True)
df_save_path = os.path.join(self.save_dir, "meta.pkl")
meta_df.to_pickle(df_save_path)
logger.info(f"{self.name} -> Metadata DataFrames merged successfully at {df_save_path}.")
quality_df = pd.DataFrame()
for p in quality_df_paths:
quality_df = quality_df.append(pd.read_pickle(p), ignore_index=True)
df_save_path = os.path.join(self.save_dir, "det_quality.pkl")
quality_df.to_pickle(df_save_path)
logger.info(f"{self.name} -> Detection Quality DataFrames merged successfully at {df_save_path}.")
self.save(obj="", name="_dummy", type_="json")
class MakeImageDataset(Task):
def __init__(
self,
df_dir: str,
source_tiff_dir: str,
source_channels: List[str],
export_channels: List[str],
export_shape: List[int],
export_resolution: float,
nan_value: Union[str, float, int],
ann_df_path: str,
ann_gps_name: str,
ann_values_name: str,
tol_distance: float,
save_dir: str
):
super().__init__()
self.df_dir = df_dir
self.source_tiff_dir = source_tiff_dir
self.source_channels = np.asarray(source_channels)
self.export_channels = np.asarray(export_channels)
self.export_shape = export_shape
self.export_resolution = export_resolution
if type(nan_value) == str:
if nan_value == "nan":
self.nan_value = np.nan
else:
raise AttributeError()
else:
self.nan_value = nan_value
self.ann_df_path = ann_df_path
self.ann_gps_name = ann_gps_name
self.ann_values_name = ann_values_name
self.tol_distance = tol_distance
self.save_dir = save_dir
@staticmethod
def point_inside_region(
point: Tuple,
region
) -> bool:
r, c = point
minr, minc, maxr, maxc = region.bbox
if (minr <= r <= maxr) and (minc <= c <= maxc):
return True
else:
return False
def find_bbox(
self,
img: np.ndarray,
segmask: Optional[np.ndarray]
) -> Optional[Tuple[Tuple, np.ndarray]]:
if segmask is not None:
distance = distance_transform_edt(segmask)
coords = peak_local_max(distance, min_distance=int(self.mean_shape/10), exclude_border=False)
mask = np.zeros(distance.shape, dtype=bool)
mask[tuple(coords.T)] = True
markers = label(mask)
plant_labels = watershed(-self.kernel, markers, mask=segmask)
plant_regions = regionprops(plant_labels)
else:
plant_labels = np.ones(img.shape[:2], dtype=int)
plant_regions = np.asarray(regionprops(plant_labels, intensity_image=self.kernel))
valid = np.array([self.point_inside_region((self.export_shape[0]//2, self.export_shape[1]//2), r) for r in plant_regions], dtype=bool)
if np.sum(valid) > 0:
if np.sum(valid) > 1:
v = np.argmax([plant_regions[i].area if valid[i] else 0 for i in range(len(plant_regions))])
else:
v = np.argmax(valid)
region = plant_regions[v]
minr, minc, maxr, maxc = region.bbox
if segmask is not None:
retmask = segmask.copy()
retmask[:minr,:] = 0
retmask[maxr:,:] = 0
retmask[:,:minc] = 0
retmask[:,maxc:] = 0
else:
retmask = plant_labels
return (minc, minr, maxc, maxr), retmask
else:
return None
def retrieve_annotations(
self,
plants: pd.DataFrame,
meta: pd.DataFrame
) -> pd.DataFrame:
nn = NearestNeighbors(n_neighbors=1)
ann = | pd.read_pickle(self.ann_df_path) | pandas.read_pickle |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[18]:
import requests
import json
from lxml import etree
from bs4 import BeautifulSoup
headers={
'Cookie':'SINAGLOBAL=5067277944781.756.1539012379187; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFXAgGhhYE-bL1UlBNsI6xh5JpX5KMhUgL.Foqce0eN1h2cehB2dJLoIEXLxK-L1h5LB-eLxK-L1h5LB-eLxK-L1K5L1heLxKBLBonL1h.LxKMLBKzL1KMt; UOR=jx3.xoyo.com,widget.weibo.com,login.sina.com.cn; ALF=1665190926; SSOLoginState=1633654926; SCF=AjnY75MXDIg2Sev-TVKQBdyuwLa-mrIYwFgLkjivnwGqe4HMR8MVkSqyfw315Fic7gc1c38G1W-RUtxrwPqe0qY.; SUB=_2A25MW-jeDeRhGeBI6FEW-C_KyziIHXVvEV0WrDV8PUNbmtAKLUzhkW9NRppHJg76K77LtSOxPlpC13YygxcK3EKM; _s_tentry=login.sina.com.cn; Apache=441836365226.03375.1633654927612; ULV=1633654927618:48:1:1:441836365226.03375.1633654927612:1632876696485',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36'
}
def get_top():
url = "https://s.weibo.com/top/summary"
r = requests.get(url,headers=headers)
# print(r.text)
# print(r.status_code)
html_xpath = etree.HTML(r.text)
data = html_xpath.xpath('//*[@id="pl_top_realtimehot"]/table/tbody/tr[1]/td[2]')
num = 1
for tr in (data):
print('-------------')
title = tr.xpath('./a/text()')
hot_score = tr.xpath('./span/text()')
href = tr.xpath('./a/@href')
if hot_score:
print('{} {} hot: {}'.format(num,title[0],hot_score[0]))
request = get_weibo_list('https://s.weibo.com/weibo?q=%23'+tittle[0]+'%23&Refer=top')
print(result)
num += 1
def get_weibo_list(url):
r = requests.get(url,headers=headers)
bs = BeautifulSoup(r.text)
body = bs.body
div_m_main = body.find('div',attrs={'class':'m-main'})
div_m_wrap = div_m_main.find('div',attrs={'class':'m-wrap'})
div_m_con_l = div_m_wrap.find('div',attrs={'class':'m-con-l'})
data_div = div_m_con_l.findAll('div',attrs={'class':'card-wrap','action-type':'feed_list_item'})
weibo_list = []
for each_div in data_div:
div_card = each_div.find('div',attrs={'class':'card'})
div_card_feed = div_card.find('div',attrs={'class':'card-feed'})
div_content = div_card_feed.find('div',attrs={'class':'content'})
p_feed_list_content = div_content.find('p',attrs={'class':'txt','node-type':'feed_list_content'})
content_text = p_feed_list_content.get_text()
p_feed_list_content_full = div_content.find('p',attrs={'class':'txt','node-type':'feed_list_content_full'})
if p_feed_list_content_full:
content_text = p_feed_list_content_full.get_text()
weibo_list.append(content_text.strip())
return weibo_list
# In[19]:
get_top()
# In[20]:
#爬取“吴磊绝杀”标题的额主要内容
cont = get_weibo_list('https://s.weibo.com/weibo?q=%23%E6%AD%A6%E7%A3%8A%E7%BB%9D%E6%9D%80%23&Refer=top')
cont
# In[29]:
import re
import jieba
#去除噪声函数,去除微博内容中的特殊符号,语气助词等噪声、
def process(text):
#去除url
text = re.sub("(https?|ftp|file)://[-A-Za-z0-9+&@#/%=~_|]"," ",text)
#去除@xxx(用户名)
text = re.sub("@.+?( |$)", " ", text)
#去除{%xxx%}(地理定位,微博话题等)
text = re.sub("\{%.+?%\}", " ",text)
#去除#xx#(标题引用)
text = re.sub("\{#.+?#\}", " ", text)
#去除【xx】(里面的内容通常都不是用户自己写的)
text = re.sub("【.+?】", " ", text)
#数据集中的噪声
text = re.sub('\u200b'," ",text)
#分词
words = [w for w in jieba.lcut(text) if w.isalpha()]
result = " ".join(words)
return result
# In[30]:
#调用去噪函数处理爬取下来的内容
pro_cont = []
for each in cont:
pro_cont.append(process(each))
pro_cont
# In[31]:
#为构建文本向量做准备,先转换成pd的DataFrame格式
import pandas as pd
df_title = | pd.DataFrame(pro_cont,columns=['words']) | pandas.DataFrame |
import pandas as pd
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from Levenshtein._levenshtein import distance as lev_distance
from sadie.airr import AirrTable, LinkedAirrTable
from typing import Union
import numpy as np
class Cluster:
"""Main clustering class.
This class is used to cluster a given set of data points.
"""
def __init__(
self,
airrtable: Union[AirrTable, LinkedAirrTable],
linkage="complete",
groupby=None,
lookup=["cdr1_aa", "cdr2_aa", "cdr3_aa"],
):
"""Initialize the clustering class.
Arguments
---------
airrtable (AirrTable, LinkedAirrTable): The airrtable to cluster.
linkage (str): The linkage method to use. Default is complete. default is complete.
groupby (str): The linkage method to use. Default is complete. default is complete.
Raises
------
TypeError
No airrtable was provided.
ValueError
groupby columns must be in the airrtable.
ValueError
lookup columns must be in the airrtable
"""
if not isinstance(airrtable, (AirrTable, LinkedAirrTable)):
raise TypeError("airrtable table must be a AirrTable or LinkedAirrTable")
if groupby is not None:
diff = set(groupby).difference(set(airrtable.columns))
if diff:
raise ValueError(f"groupby column(s) {diff} not found in airrtable")
diff = set(lookup).difference(set(airrtable.columns))
if diff:
raise ValueError(f"lookup column(s) {diff} not found in airrtable")
self.airrtable = airrtable
self.linkage = linkage
self.groupby = groupby
self.lookup = lookup
self.key_column = airrtable.key_column
if isinstance(self.airrtable, LinkedAirrTable):
self._type = "linked"
else:
self._type = "unlinked"
def _get_distance_df(self, df):
"""Given a dataframe, get the N x N pairwise distances using Levenshtein distance of the lookup"""
df_lookup = df[self.lookup].to_dict(orient="index")
def calc_lev(x, y):
dist = 0
for metric in self.lookup:
dist += lev_distance(str(df_lookup[x[0]][metric]), str(df_lookup[y[0]][metric]))
return dist
X = np.array(df.index).reshape(-1, 1)
return pairwise_distances(X, metric=calc_lev, n_jobs=-1)
def cluster(self, distance_threshold=3):
"""Cluster the data.
This method clusters the data using the specified linkage and affinity
methods.
Arguments
---------
distance_threshold (int): The maximum distance between two points to be. Default is 3.
"""
if self.groupby is None:
distance_df = self._get_distance_df(self.airrtable)
model = AgglomerativeClustering(
linkage=self.linkage, affinity="precomputed", distance_threshold=distance_threshold, n_clusters=None
)
model.fit(distance_df)
# Create the data frame
self.airrtable["cluster"] = model.labels_
else:
cluster_catcher = []
for g, g_df in self.airrtable.groupby(self.groupby):
distance_df = self._get_distance_df(g_df)
# Calculate the linkage matrix
model = AgglomerativeClustering(
linkage=self.linkage,
affinity="precomputed",
distance_threshold=distance_threshold,
n_clusters=None,
)
if len(g_df) == 1:
_labels = [0]
else:
model.fit(distance_df)
_labels = model.labels_
# Create the data frame
if isinstance(g, str):
labels = list(map(lambda x: f"{g}_{str(x)}", _labels))
elif isinstance(g, (list, tuple)):
_sub_labels = "_".join([str(i) for i in g])
labels = list(map(lambda x: f"{_sub_labels}_{str(x)}", _labels))
else:
raise ValueError("groupby must be a string or a list/tuple of strings")
g_df["cluster"] = labels
cluster_catcher.append(g_df)
self.airrtable = | pd.concat(cluster_catcher) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
#imports
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
# date range
str_st = "2019-12-28"
str_en = "2021-01-30"
start_date = datetime.strptime(str_st,"%Y-%m-%d")
end_date = datetime.strptime(str_en,"%Y-%m-%d")
def date_range_str(start_date, end_date):
return [(start_date + timedelta(n)).strftime('%Y%m%d') for n in range(int ((end_date - start_date).days) + 1)]
date_list = date_range_str(start_date, end_date)
column_names = ['timestamp', 'allocation_id', 'count_hostname', 'count_node_name','job_sch_node_num',
'mean_cpu_power', 'std_cpu_power', 'min_cpu_power', 'max_cpu_power', 'q25_cpu_power',
'q50_cpu_power', 'q75_cpu_power', 'count_cpu_power', 'size_cpu_power', 'cpu_nans',
'mean_gpu_power', 'std_gpu_power', 'min_gpu_power', 'max_gpu_power', 'q25_gpu_power',
'q50_gpu_power', 'q75_gpu_power', 'count_gpu_power', 'size_gpu_power', 'gpu_nans']
SOURCE_DIR = '/gpfs/alpine/stf218/proj-shared/data/lake/summit_power_temp_openbmc/power_ts_job_aware_10s_components'
DEST_DIR = '/gpfs/alpine/stf218/proj-shared/data/lake/summit_power_temp_openbmc/power_jobwise_10s_components'
for i,date_ins in enumerate(date_list):
if date_ins == 20210130:
#for last day
df_today = pd.read_csv(f'{SOURCE_DIR}/{date_list[i]}.csv')
df_prev = pd.read_csv(f'{SOURCE_DIR}/{date_list[i-1]}.csv')
df_next = | pd.DataFrame(columns=column_names) | pandas.DataFrame |
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin, ema, get_min_max
class AroonIndicator(IndicatorMixin):
"""Aroon Indicator
Identify when trends are likely to change direction.
Aroon Up = ((N - Days Since N-day High) / N) x 100
Aroon Down = ((N - Days Since N-day Low) / N) x 100
Aroon Indicator = Aroon Up - Aroon Down
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 25, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
rolling_close = self._close.rolling(self._n, min_periods=0)
self._aroon_up = rolling_close.apply(
lambda x: float(np.argmax(x) + 1) / self._n * 100, raw=True)
self._aroon_down = rolling_close.apply(
lambda x: float(np.argmin(x) + 1) / self._n * 100, raw=True)
def aroon_up(self) -> pd.Series:
"""Aroon Up Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_up = self._check_fillna(self._aroon_up, value=0)
return pd.Series(aroon_up, name=f'aroon_up_{self._n}')
def aroon_down(self) -> pd.Series:
"""Aroon Down Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_down = self._check_fillna(self._aroon_down, value=0)
return pd.Series(aroon_down, name=f'aroon_down_{self._n}')
def aroon_indicator(self) -> pd.Series:
"""Aroon Indicator
Returns:
pandas.Series: New feature generated.
"""
aroon_diff = self._aroon_up - self._aroon_down
aroon_diff = self._check_fillna(aroon_diff, value=0)
return pd.Series(aroon_diff, name=f'aroon_ind_{self._n}')
class MACD(IndicatorMixin):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://school.stockcharts.com/doku.php?id=technical_indicators:moving_average_convergence_divergence_macd
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
close: pd.Series,
n_slow: int = 26,
n_fast: int = 12,
n_sign: int = 9,
fillna: bool = False):
self._close = close
self._n_slow = n_slow
self._n_fast = n_fast
self._n_sign = n_sign
self._fillna = fillna
self._run()
def _run(self):
self._emafast = ema(self._close, self._n_fast, self._fillna)
self._emaslow = ema(self._close, self._n_slow, self._fillna)
self._macd = self._emafast - self._emaslow
self._macd_signal = ema(self._macd, self._n_sign, self._fillna)
self._macd_diff = self._macd - self._macd_signal
def macd(self) -> pd.Series:
"""MACD Line
Returns:
pandas.Series: New feature generated.
"""
macd = self._check_fillna(self._macd, value=0)
return pd.Series(macd, name=f'MACD_{self._n_fast}_{self._n_slow}')
def macd_signal(self) -> pd.Series:
"""Signal Line
Returns:
pandas.Series: New feature generated.
"""
macd_signal = self._check_fillna(self._macd_signal, value=0)
return pd.Series(macd_signal, name=f'MACD_sign_{self._n_fast}_{self._n_slow}')
def macd_diff(self) -> pd.Series:
"""MACD Histogram
Returns:
pandas.Series: New feature generated.
"""
macd_diff = self._check_fillna(self._macd_diff, value=0)
return pd.Series(macd_diff, name=f'MACD_diff_{self._n_fast}_{self._n_slow}')
class EMAIndicator(IndicatorMixin):
"""EMA - Exponential Moving Average
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 14, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
def ema_indicator(self) -> pd.Series:
"""Exponential Moving Average (EMA)
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(self._close, self._n, self._fillna)
return pd.Series(ema_, name=f'ema_{self._n}')
class TRIXIndicator(IndicatorMixin):
"""Trix (TRIX)
Shows the percent rate of change of a triple exponentially smoothed moving
average.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 15, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
ema1 = ema(self._close, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
ema3 = ema(ema2, self._n, self._fillna)
self._trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(1, fill_value=ema3.mean())
self._trix *= 100
def trix(self) -> pd.Series:
"""Trix (TRIX)
Returns:
pandas.Series: New feature generated.
"""
trix = self._check_fillna(self._trix, value=0)
return pd.Series(trix, name=f'trix_{self._n}')
class MassIndex(IndicatorMixin):
"""Mass Index (MI)
It uses the high-low range to identify trend reversals based on range
expansions. It identifies range bulges that can foreshadow a reversal of
the current trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n(int): n low period.
n2(int): n high period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n: int = 9, n2: int = 25, fillna: bool = False):
self._high = high
self._low = low
self._n = n
self._n2 = n2
self._fillna = fillna
self._run()
def _run(self):
amplitude = self._high - self._low
ema1 = ema(amplitude, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
mass = ema1 / ema2
self._mass = mass.rolling(self._n2, min_periods=0).sum()
def mass_index(self) -> pd.Series:
"""Mass Index (MI)
Returns:
pandas.Series: New feature generated.
"""
mass = self._check_fillna(self._mass, value=0)
return pd.Series(mass, name=f'mass_index_{self._n}_{self._n2}')
class IchimokuIndicator(IndicatorMixin):
"""Ichimoku Kinkō Hyō (Ichimoku)
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n1(int): n1 low period.
n2(int): n2 medium period.
n3(int): n3 high period.
visual(bool): if True, shift n2 values.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n1: int = 9, n2: int = 26, n3: int = 52,
visual: bool = False, fillna: bool = False):
self._high = high
self._low = low
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._visual = visual
self._fillna = fillna
def ichimoku_a(self) -> pd.Series:
"""Senkou Span A (Leading Span A)
Returns:
pandas.Series: New feature generated.
"""
conv = 0.5 * (self._high.rolling(self._n1, min_periods=0).max()
+ self._low.rolling(self._n1, min_periods=0).min())
base = 0.5 * (self._high.rolling(self._n2, min_periods=0).max()
+ self._low.rolling(self._n2, min_periods=0).min())
spana = 0.5 * (conv + base)
spana = spana.shift(self._n2, fill_value=spana.mean()) if self._visual else spana
spana = self._check_fillna(spana, value=-1)
return pd.Series(spana, name=f'ichimoku_a_{self._n1}_{self._n2}')
def ichimoku_b(self) -> pd.Series:
"""Senkou Span B (Leading Span B)
Returns:
pandas.Series: New feature generated.
"""
spanb = 0.5 * (self._high.rolling(self._n3, min_periods=0).max()
+ self._low.rolling(self._n3, min_periods=0).min())
spanb = spanb.shift(self._n2, fill_value=spanb.mean()) if self._visual else spanb
spanb = self._check_fillna(spanb, value=-1)
return pd.Series(spanb, name=f'ichimoku_b_{self._n1}_{self._n2}')
class KSTIndicator(IndicatorMixin):
"""KST Oscillator (KST Signal)
It is useful to identify major stock market cycle junctures because its
formula is weighed to be more greatly influenced by the longer and more
dominant time spans, in order to better reflect the primary swings of stock
market cycle.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst
Args:
close(pandas.Series): dataset 'Close' column.
r1(int): r1 period.
r2(int): r2 period.
r3(int): r3 period.
r4(int): r4 period.
n1(int): n1 smoothed period.
n2(int): n2 smoothed period.
n3(int): n3 smoothed period.
n4(int): n4 smoothed period.
nsig(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, r1: int = 10, r2: int = 15, r3: int = 20, r4: int = 30,
n1: int = 10, n2: int = 10, n3: int = 10, n4: int = 15, nsig: int = 9,
fillna: bool = False):
self._close = close
self._r1 = r1
self._r2 = r2
self._r3 = r3
self._r4 = r4
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._n4 = n4
self._nsig = nsig
self._fillna = fillna
self._run()
def _run(self):
rocma1 = ((self._close - self._close.shift(self._r1, fill_value=self._close.mean()))
/ self._close.shift(self._r1, fill_value=self._close.mean())).rolling(self._n1, min_periods=0).mean()
rocma2 = ((self._close - self._close.shift(self._r2, fill_value=self._close.mean()))
/ self._close.shift(self._r2, fill_value=self._close.mean())).rolling(self._n2, min_periods=0).mean()
rocma3 = ((self._close - self._close.shift(self._r3, fill_value=self._close.mean()))
/ self._close.shift(self._r3, fill_value=self._close.mean())).rolling(self._n3, min_periods=0).mean()
rocma4 = ((self._close - self._close.shift(self._r4, fill_value=self._close.mean()))
/ self._close.shift(self._r4, fill_value=self._close.mean())).rolling(self._n4, min_periods=0).mean()
self._kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
self._kst_sig = self._kst.rolling(self._nsig, min_periods=0).mean()
def kst(self) -> pd.Series:
"""Know Sure Thing (KST)
Returns:
pandas.Series: New feature generated.
"""
kst = self._check_fillna(self._kst, value=0)
return pd.Series(kst, name='kst')
def kst_sig(self) -> pd.Series:
"""Signal Line Know Sure Thing (KST)
nsig-period SMA of KST
Returns:
pandas.Series: New feature generated.
"""
kst_sig = self._check_fillna(self._kst_sig, value=0)
return pd.Series(kst_sig, name='kst_sig')
def kst_diff(self) -> pd.Series:
"""Diff Know Sure Thing (KST)
KST - Signal_KST
Returns:
pandas.Series: New feature generated.
"""
kst_diff = self._kst - self._kst_sig
kst_diff = self._check_fillna(kst_diff, value=0)
return pd.Series(kst_diff, name='kst_diff')
class DPOIndicator(IndicatorMixin):
"""Detrended Price Oscillator (DPO)
Is an indicator designed to remove trend from price and make it easier to
identify cycles.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:detrended_price_osci
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 20, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._dpo = (self._close.shift(int((0.5 * self._n) + 1), fill_value=self._close.mean())
- self._close.rolling(self._n, min_periods=0).mean())
def dpo(self) -> pd.Series:
"""Detrended Price Oscillator (DPO)
Returns:
pandas.Series: New feature generated.
"""
dpo = self._check_fillna(self._dpo, value=0)
return pd.Series(dpo, name='dpo_'+str(self._n))
class CCIIndicator(IndicatorMixin):
"""Commodity Channel Index (CCI)
CCI measures the difference between a security's price change and its
average price change. High positive readings indicate that prices are well
above their average, which is a show of strength. Low negative readings
indicate that prices are well below their average, which is a show of
weakness.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
c(int): constant.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
n: int = 20,
c: float = 0.015,
fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._c = c
self._fillna = fillna
self._run()
def _run(self):
def _mad(x):
return np.mean(np.abs(x-np.mean(x)))
pp = (self._high + self._low + self._close) / 3.0
self._cci = ((pp - pp.rolling(self._n, min_periods=0).mean())
/ (self._c * pp.rolling(self._n, min_periods=0).apply(_mad, True)))
def cci(self) -> pd.Series:
"""Commodity Channel Index (CCI)
Returns:
pandas.Series: New feature generated.
"""
cci = self._check_fillna(self._cci, value=0)
return pd.Series(cci, name='cci')
class ADXIndicator(IndicatorMixin):
"""Average Directional Movement Index (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to
collectively as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
assert self._n != 0, "N may not be 0 and is %r" % n
cs = self._close.shift(1)
pdm = get_min_max(self._high, cs, 'max')
pdn = get_min_max(self._low, cs, 'min')
tr = pdm - pdn
self._trs_initial = np.zeros(self._n-1)
self._trs = np.zeros(len(self._close) - (self._n - 1))
self._trs[0] = tr.dropna()[0:self._n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(self._trs)-1):
self._trs[i] = self._trs[i-1] - (self._trs[i-1]/float(self._n)) + tr[self._n+i]
up = self._high - self._high.shift(1)
dn = self._low.shift(1) - self._low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
self._dip = np.zeros(len(self._close) - (self._n - 1))
self._dip[0] = pos.dropna()[0:self._n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(self._dip)-1):
self._dip[i] = self._dip[i-1] - (self._dip[i-1]/float(self._n)) + pos[self._n+i]
self._din = np.zeros(len(self._close) - (self._n - 1))
self._din[0] = neg.dropna()[0:self._n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(self._din)-1):
self._din[i] = self._din[i-1] - (self._din[i-1]/float(self._n)) + neg[self._n+i]
def adx(self) -> pd.Series:
"""Average Directional Index (ADX)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._trs))
for i in range(len(self._trs)):
dip[i] = 100 * (self._dip[i]/self._trs[i])
din = np.zeros(len(self._trs))
for i in range(len(self._trs)):
din[i] = 100 * (self._din[i]/self._trs[i])
dx = 100 * np.abs((dip - din) / (dip + din))
adx = np.zeros(len(self._trs))
adx[self._n] = dx[0:self._n].mean()
for i in range(self._n+1, len(adx)):
adx[i] = ((adx[i-1] * (self._n - 1)) + dx[i-1]) / float(self._n)
adx = np.concatenate((self._trs_initial, adx), axis=0)
self._adx = pd.Series(data=adx, index=self._close.index)
adx = self._check_fillna(self._adx, value=20)
return pd.Series(adx, name='adx')
def adx_pos(self) -> pd.Series:
"""Plus Directional Indicator (+DI)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
dip[i+self._n] = 100 * (self._dip[i]/self._trs[i])
adx_pos = self._check_fillna(pd.Series(dip, index=self._close.index), value=20)
return pd.Series(adx_pos, name='adx_pos')
def adx_neg(self) -> pd.Series:
"""Minus Directional Indicator (-DI)
Returns:
pandas.Series: New feature generated.
"""
din = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
din[i+self._n] = 100 * (self._din[i]/self._trs[i])
adx_neg = self._check_fillna(pd.Series(din, index=self._close.index), value=20)
return pd.Series(adx_neg, name='adx_neg')
class VortexIndicator(IndicatorMixin):
"""Vortex Indicator (VI)
It consists of two oscillators that capture positive and negative trend
movement. A bullish signal triggers when the positive trend indicator
crosses above the negative trend indicator or a key level.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
tr = (self._high.combine(self._close.shift(1, fill_value=self._close.mean()), max)
- self._low.combine(self._close.shift(1, fill_value=self._close.mean()), min))
trn = tr.rolling(self._n).sum()
vmp = np.abs(self._high - self._low.shift(1))
vmm = np.abs(self._low - self._high.shift(1))
self._vip = vmp.rolling(self._n, min_periods=0).sum() / trn
self._vin = vmm.rolling(self._n, min_periods=0).sum() / trn
def vortex_indicator_pos(self):
"""+VI
Returns:
pandas.Series: New feature generated.
"""
vip = self._check_fillna(self._vip, value=1)
return pd.Series(vip, name='vip')
def vortex_indicator_neg(self):
"""-VI
Returns:
pandas.Series: New feature generated.
"""
vin = self._check_fillna(self._vin, value=1)
return | pd.Series(vin, name='vin') | pandas.Series |
from abc import ABC, abstractmethod
from typing import List, Any, Dict
import pandas as pd
import numpy as np
from hdbscan import HDBSCAN, all_points_membership_vectors
from pydantic.main import BaseModel
from sklearn.neighbors import LocalOutlierFactor
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.lof import LOF
from pyod.models.ocsvm import OCSVM
from pyod.models.pca import PCA
from pyod.models.cblof import CBLOF
from pyod.models.auto_encoder import AutoEncoder
from pyod.models.vae import VAE
from ivis import Ivis
from sklearn.decomposition import PCA as PCAR
from sklearn.manifold import TSNE
from umap import UMAP
from src.utils import next_path, product_dict, get_scores, sample_data
from pydantic import BaseModel, Field
from src.embedders import EmbeddingModel
class TestData(BaseModel):
"""Loads from path, samples and holds outlier dataset."""
path: str
name: str
fraction: list #List[float]
contamination: list #List[float]
seed: list #List[int]
min_len: int = 5
df: Any
def load_data(self):
print(f"Loading data from {self.path} to DataFrame...")
self.df = | pd.read_pickle(self.path) | pandas.read_pickle |
'''
Script with Functions to
- clean Timeseries Data from ECA&D and save the cleaned Data as csv-file
- analyze Trend, Seasonaliyt and Remainder
- calculate the # of lags for an AutoReg Model
'''
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.compose import ColumnTransformer
import seaborn as sns
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.graphics.tsaplots import plot_pacf, plot_acf
from sklearn.linear_model import LinearRegression
def data_cleaning(datapath):
'''
Function to clean Timeseries Data from ECA&D and save the cleaned Data as csv-file.
Parameters
--------
datapath: str
File path to the data.
Return
df: pd.DataFrame
Cleaned DataFrame
'''
df = pd.read_table(datapath, skiprows = 18, delimiter= ',', header=0, index_col=1, parse_dates = True)
df.rename(columns = {' TG':'TG', ' SOUID': 'SOUID', ' Q_TG': 'Q_TG'}, inplace = True) #remove whitespaces in header, rename columns
df.index.rename('date', inplace = True) #rename index column
df.dropna(axis=0,inplace = True) #drops zero values
df.index = pd.to_datetime(df.index) #transform in pandas DataFrame
df = df.replace('\n','', regex=True) #replace newlines
df = df.replace(' ','', regex=True) #replace whitespaces
df['t_mean'] = df['TG']*0.1 #TG is given in 0.1°C create column with T in °C
df['t_mean'].round(decimals = 2)
df.drop(df[df['t_mean'] < -30].index, inplace = True) #drop values on index where T is smaller than -30 °C (false data)
df.drop(df[['SOUID', 'Q_TG', 'TG']], axis =1, inplace = True) #drop SOUID, Q_TG and TG column
df.to_csv('./data/DATA_CLEAN.csv') #save cleaned DataFrame
return df
def explore_dataset(csv_file, location):
'''
Vizualisation of the Temperature dataset(csv-file)
Parameters
----------
csv_file: str
Name of the csv-file
location: str
The location of the weatherstation
Returns
---------
'''
df = pd.read_csv(csv_file, index_col=0, parse_dates=True)
#check for null values
plt.bar(x='Nan', height=df.isna().sum())
plt.title('Check for NaN values')
plt.show()
plt.plot(df.index,df['t_mean'])
plt.xlabel('Year')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Temperature profile: ' + location)
plt.show()
plt.clf()
plt.plot(df.index[:3650],df['t_mean'][:3650])
plt.xlabel('dates')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Temperature profile at '+location+ ' the last 10 years.')
plt.show()
plt.clf()
plt.plot(range(1,13), df.groupby(df.index.month)['t_mean'].mean())
plt.xticks(rotation=30)
plt.xlabel('Months')
plt.ylabel('Avg. Temperature for each Month in the year range [°C]')
plt.title('Mean Temperature profile per Month: ' + location)
plt.show()
plt.clf()
def load_split_data_month(csv_file):
'''
Load of the Temperature dataset(csv-file) and split into train and test data.
Creating the index column with monthly frequency and the monthly mean of the data.
Parameters
----------
csv_file: str
Name of the csv-file
Returns
---------
df_m: pd.dataframe()
The dataframe with the train-data with an monthly freq.
df_test: pd.dataframe()
The dataframe with the test-data with an monthly freq.
Xtrain: matrix
The X values to train the model
ytrain: vector
The y values to train the model
Xtest: matrix
The X values to test the model
ytest: vector
The y values to test the model
'''
#load data
df_data = pd.read_csv(csv_file, index_col=0, parse_dates=True)
#Train_test-split
df_test = df_data[-356::] #create a test DataFrame df with values of last year
df = df_data[:(len(df_data)-356)] #create a train DataFrame with all values expect one year
df_m = df.resample('MS').mean() #instead of daily mean use monthly mean
df_m.dropna(axis=0,inplace = True)
df_test = df_test.resample('MS').mean()
Xtrain = df_m.index
ytrain = df_m['t_mean']
Xtest = df_test.index
ytest = df_test['t_mean']
return df_m, df_test, Xtrain, ytrain, Xtest, ytest
def analyzing_trend_month(df,y):
'''
Analyzing the Trend of the Temperature dataset by creating a timestep column
and modeling a Linear Regression with and without Polynomial Features.
Parameters
----------
df: pd.dataframe()
The dataframe with the train-data
ytrain: vector
The y values to train the model
Returns
---------
df: pd.dataframe()
The new dataframe with the timestep, trend and trend_poly column.
'''
# Analyzing Time Series
df['timestep'] = range(len(df)) #creating timesteps to get the trend out of the data with a LinearRegression
Xtrend = df[['timestep']] #assign X values for LinReg
#Modelling linear
m = LinearRegression()
m.fit(Xtrend, y)
trend = m.coef_*12*df.index.year.nunique()
print('Trend: '+str(trend)+' °C') #getting a trend
print('intercept: ' + str(m.intercept_)) #getting an intercept
df['trend'] = m.predict(Xtrend)
df[['t_mean', 'trend']].plot()
plt.xlabel('Year')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Trend Linear')
plt.show()
#Modelling polynomial
column_poly = ColumnTransformer([('poly', PolynomialFeatures(degree=2, include_bias=False), ['timestep'])]) #using polyFeat to analyze polynomial behaviour of Temp over time
column_poly.fit(Xtrend)
Xtrans=column_poly.transform(Xtrend)
m_poly = LinearRegression()
m_poly.fit(Xtrans, y)
trend = m_poly.coef_*12*df.index.year.nunique()
print('trend: ' + str(trend))
df['trend_poly'] = m_poly.predict(Xtrans)
df[['t_mean','trend_poly']].plot()
plt.xlabel('Year')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Trend Polynomial Degree = 2')
plt.show()
return df
def analyzing_seasonality_month(df,X,y):
'''
Analyzing the Seasonality of the Temperature dataset using the month column.
Parameters
----------
df: pd.dataframe()
The whole DataFrame
Xtrain: matrix
The X values to train the model
ytrain: vector
The y values to train the model
Returns
---------
df: pd.dataframe()
The new dataframe with the trendANDseasonality column and the dummies(month).
'''
df['month'] = df.index.month #create column with # of month
seasonal_dummies = pd.get_dummies(df['month'],prefix='month', drop_first=True) #create dummies for each month
df = df.merge(seasonal_dummies,left_index = True, right_index=True)
Xseason = df.drop(['t_mean','trend','trend_poly','month'], axis=1)
m = LinearRegression()
m.fit(Xseason, y)
df['trendANDseasonality'] = m.predict(Xseason)
df[['t_mean','trendANDseasonality']].plot()
plt.xlabel('Year')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Trend and Seasonality')
plt.show()
return df
def analyzing_remainder(df, csv_output_name):
'''
Analyzing the Remainder and save th df in a csv file.
Parameters
----------
df: pd.dataframe()
The dataframe with the data
csv_output_name: str
The filename for the output data.
Returns
---------
df: pd.dataframe()
The dataframe with all columns.
csv-file in the folder
'''
# Remainder
df['remainder'] = df['t_mean'] - df['trendANDseasonality'] #calculate the remainder by subtracting trend ans seasonality
df['remainder'].plot()
plt.xlabel('Year')
plt.ylabel('Avg. Temperature [°C]')
plt.title('Remainder')
plt.show()
df_re = df['remainder']
df_re.to_csv(csv_output_name)
return df
def number_lags(csv_file):
'''
Using statsmodel to find out how many lags should be used for the AutoReg Model.
Parameters
----------
csv_file: str
The Name of the csv-file with the data
Returns
---------
df_re: pd.Dataframe
The dataframe of the csv_file.
lags: lst
The amount of lags that was calculated with ar_select_order (statsmodel)
summary: stasmodel output after fitting the model
'''
#load remainder csv file
df_re = | pd.read_csv(csv_file, index_col=0, parse_dates=True) | pandas.read_csv |
import numpy as np
import pandas as pd
from rank_preferences import *
from correlations import *
from weighting_methods import *
from spotis import SPOTIS
from de import DE_algorithm
from visualization import *
def main():
# load dataset
filename = 'input/mobile_phones2000.csv'
data = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 18:29:04 2021
@author: danielgui
"""
import pandas as pd
import numpy as np
import transsim.xml2csv as xml2csv
import os
import dask.dataframe as dd
class Output_Processor:
def __init__(self):
pass
def generate(self, result_path):
## convert xml to csv
xml2csv.main([result_path + 'EdgeMean.xml'])
xml2csv.main([result_path + 'busstop_output.xml'])
xml2csv.main([result_path + 'trajectories_output.xml', '-p'])
os.makedirs(result_path + 'output/')
# -p is used to split the output files based on the first level
## bus stop output containing delay and person load information
try:
stopO = pd.read_csv(result_path + "busstop_output.csv",sep=';')
if not stopO.empty:
stopO=stopO[["stopinfo_id","stopinfo_busStop","stopinfo_started","stopinfo_arrivalDelay",
"stopinfo_ended","stopinfo_delay","stopinfo_initialPersons",
"stopinfo_loadedPersons","stopinfo_unloadedPersons",
"stopinfo_lane","stopinfo_pos","stopinfo_parking"]]
stopO=stopO.sort_values(["stopinfo_id","stopinfo_started"])
# write final stop output
stopO.to_csv(result_path + "output/busstop_info.csv",index=False)
else:
print('busstop output is empty')
except pd.errors.EmptyDataError:
print("busstop output is empty")
## edge based output with mean speed for each hour(3600s)
edgeO = pd.read_csv(result_path + "EdgeMean.csv",sep=';')
if not edgeO.empty:
edgeO=edgeO[edgeO.columns.intersection(["interval_begin","interval_end","edge_id","edge_speed",
"edge_density","edge_laneDensity","edge_left",
"edge_occupancy","edge_traveltime",
"edge_waitingTime","edge_entered"])]
# UNIT: "edge_speed":m/s, "edge_density":#veh/km, "edge_occupancy":%
edgeO.to_csv(result_path + "output/edge_info.csv",index=False)
else:
print('EdgeDump output is empty')
# ## trajectory for all vehicles during the simulation time interval
# motion = pd.read_csv(result_path + "trajectories_outputmotionState.csv",sep=';',low_memory=False)
# vehtype = pd.read_csv(result_path + "trajectories_outputactorConfig.csv",sep=';')
# vehref = pd.read_csv(result_path + "trajectories_outputvehicle.csv",sep=';')
# # extract the output values for buses
# vehref['vehicle_ref'] = vehref['vehicle_ref'].astype('str')
# bus=vehref[vehref['vehicle_ref'].apply(lambda x: len(x)>20)]
# busref=bus[['vehicle_ref','vehicle_id','vehicle_actorConfig']]
# busref.rename(columns={'vehicle_actorConfig' : 'actorConfig_id'},inplace = True)
# # join busref and vehtype by the same column 'actorConfig_id'
# businfo=pd.merge(busref, vehtype, on='actorConfig_id')
# traj=motion.loc[motion.motionState_vehicle.isin(businfo.vehicle_id), ]
# traj=traj[['motionState_vehicle','motionState_time','motionState_speed','motionState_acceleration']]
# traj=traj.sort_values(['motionState_vehicle','motionState_time'])
# traj.rename(columns={'motionState_vehicle' : 'vehicle_id','motionState_time':'time','motionState_speed':'speed',
# 'motionState_acceleration':'acceleration'},inplace = True)
# # UNIT: time:milliseconds, speed:0.01m/s, acceleration:0.0001m/s^2
# trajectory=pd.merge(traj, businfo, on='vehicle_id')
# trajectory=trajectory.drop(['vehicle_id'],axis=1)
# #group dataframe into multiple dataframe as a dict by bus name
# trajectory=dict(tuple(trajectory.groupby('vehicle_ref')))
# #write in csv files, bus trip name as the file name
# for key, df in trajectory.items():
# bus=key.replace(':','')
# with open(result_path + '' + 'output/Trajectory_' + bus + '.csv', 'w', newline='') as oFile:
# df.to_csv(oFile, index = False)
# print("Finished writing: " + 'Trajectory_' + bus)
## trajectory for all vehicles during the simulation time interval
motion = dd.read_csv(result_path + "trajectories_outputmotionState.csv",sep=';',low_memory=False)
print("motion file imported. length",motion.shape[0])
vehtype = pd.read_csv(result_path + "trajectories_outputactorConfig.csv",sep=';')
print('actor config imported. lenthi', vehtype.shape[0])
vehref = pd.read_csv(result_path + "trajectories_outputvehicle.csv",sep=';')
print('vehref imported. length', vehref.shape[0])
# extract the output values for buses
vehref['vehicle_ref'] = vehref['vehicle_ref'].astype('str')
bus=vehref[vehref['vehicle_ref'].apply(lambda x: len(x)>20)]
busref=bus[['vehicle_ref','vehicle_id','vehicle_actorConfig']]
busref= busref.rename(columns={'vehicle_actorConfig' : 'actorConfig_id'})
print('busref',busref.shape[0])
# join busref and vehtype by the same column 'actorConfig_id'
businfo= | pd.merge(busref, vehtype, on='actorConfig_id') | pandas.merge |
from copy import copy
from pandas import DataFrame, concat, notnull, Series
from typing import List, Optional
from survey.attributes import RespondentAttribute
class AttributeContainerMixin(object):
_attributes: List[RespondentAttribute]
@property
def data(self) -> DataFrame:
"""
Return a DataFrame combining data from all the questions in the group.
"""
return concat([a.data for a in self._attributes], axis=1)
def attribute(self, name: str) -> Optional[RespondentAttribute]:
"""
Return the Attribute with the given name.
:param name: Name of the attribute to return.
"""
try:
return [a for a in self._attributes if a.name == name][0]
except IndexError:
return None
def to_list(self) -> List[RespondentAttribute]:
"""
Return all the Attributes asked in the Survey.
"""
return self._attributes
def merge(self, name: Optional[str] = '', **kwargs) -> RespondentAttribute:
"""
Return a new Question combining all the responses of the different
questions in the group.
N.B. assumes that there is a maximum of one response across all
questions for each respondent.
:param name: The name for the new merged Question.
:param kwargs: Attribute values to override in the new merged Question.
"""
if len(set([type(q) for q in self._attributes])) != 1:
raise TypeError(
'Questions must all be of the same type to merge answers.'
)
if self.data.notnull().sum(axis=1).max() > 1:
raise ValueError(
'Can only merge when there is a max of one response '
'across all questions per respondent.'
)
data = self.data.loc[self.data.notnull().sum(axis=1) == 1]
new_data = [row.loc[notnull(row)].iloc[0] for _, row in data.iterrows()]
new_attribute = copy(self._attributes[0])
new_attribute.name = name
new_attribute._data = | Series(data=new_data, name=name) | pandas.Series |
"""
Utilities, accessible via subcommands.
"""
import datetime
import itertools
import json
import os
import re
import shutil
import sys
import click
import numpy as np
import pandas as pd
import vampire.common as common
from vampire import preprocess_adaptive
from vampire.gene_name_conversion import convert_gene_names
from vampire.gene_name_conversion import olga_to_adaptive_dict
from sklearn.model_selection import train_test_split
@click.group()
def cli():
pass
@cli.command()
@click.option('--idx', required=True, help='The row index for the summary output.')
@click.option('--idx-name', required=True, help='The row index name.')
@click.argument('seq_path', type=click.Path(exists=True))
@click.argument('pvae_path', type=click.Path(exists=True))
@click.argument('ppost_path', type=click.Path(exists=True))
@click.argument('out_path', type=click.Path(writable=True))
def merge_ps(idx, idx_name, seq_path, pvae_path, ppost_path, out_path):
"""
Merge probability estimates from Pvae and Ppost into a single data frame and write to an output CSV.
SEQ_PATH should be a path to sequences in canonical CSV format, with
sequences in the same order as PVAE_PATH.
"""
def prep_index(df):
df.set_index(['amino_acid', 'v_gene', 'j_gene'], inplace=True)
df.sort_index(inplace=True)
pvae_df = pd.read_csv(seq_path)
pvae_df['log_Pvae'] = pd.read_csv(pvae_path)['log_p_x']
prep_index(pvae_df)
ppost_df = convert_gene_names(pd.read_csv(ppost_path), olga_to_adaptive_dict())
prep_index(ppost_df)
# If we don't drop duplicates then merge will expand the number of rows.
# See https://stackoverflow.com/questions/39019591/duplicated-rows-when-merging-dataframes-in-python
# We deduplicate Ppost, which is guaranteed to be identical among repeated elements.
merged = pd.merge(pvae_df, ppost_df.drop_duplicates(), how='left', left_index=True, right_index=True)
merged['log_Ppost'] = np.log(merged['Ppost'])
merged.reset_index(inplace=True)
merged[idx_name] = idx
merged.set_index(idx_name, inplace=True)
merged.to_csv(out_path)
@cli.command()
@click.option('--train-size', default=1000, help="Data count to use for train.")
@click.argument('in_csv', type=click.File('r'))
@click.argument('out1_csv', type=click.File('w'))
@click.argument('out2_csv', type=click.File('w'))
def split(train_size, in_csv, out1_csv, out2_csv):
"""
Do a train/test split.
"""
df = pd.read_csv(in_csv)
(df1, df2) = train_test_split(df, train_size=train_size)
df1.to_csv(out1_csv, index=False)
df2.to_csv(out2_csv, index=False)
@cli.command()
@click.option('--out', type=click.File('w'), help='Output file path.', required=True)
@click.option('--idx', required=True, help='The row index for the summary output.')
@click.option('--idx-name', required=True, help='The row index name.')
@click.option(
'--colnames', default='', help='Comma-separated column identifier names corresponding to the files that follow.')
@click.argument('in_paths', nargs=-1)
def summarize(out, idx, idx_name, colnames, in_paths):
"""
Summarize results of a run as a single-row CSV. The input is of flexible
length: each input file is associated with an identifier specified using
the --colnames flag.
"""
colnames = colnames.split(',')
if len(colnames) != len(in_paths):
raise Exception("The number of colnames is not equal to the number of input files.")
input_d = {k: v for k, v in zip(colnames, in_paths)}
index = pd.Index([idx], name=idx_name)
if 'loss' in input_d:
loss_df = pd.read_csv(input_d['loss'], index_col=0).reset_index()
# The following 3 lines combine the data source and the metric into a
# single id like `train_j_gene_output_loss`.
loss_df = pd.melt(loss_df, id_vars='index')
loss_df['id'] = loss_df['variable'] + '_' + loss_df['index']
loss_df.set_index('id', inplace=True)
df = pd.DataFrame(dict(zip(loss_df.index, loss_df['value'].transpose())), index=index)
else:
df = pd.DataFrame(index=index)
def slurp_cols(path, prefix='', suffix=''):
"""
Given a one-row CSV with summaries, add them to df with an optional
prefix and suffix.
"""
to_slurp = pd.read_csv(path)
assert len(to_slurp) == 1
for col in to_slurp:
df[prefix + col + suffix] = to_slurp.loc[0, col]
def add_p_summary(path, name):
"""
Add a summary of something like `validation_pvae` where `validation` is
the prefix and `pvae` is the statistic.
"""
prefix, statistic = name.split('_')
if statistic == 'pvae':
log_statistic = pd.read_csv(path)['log_p_x']
elif statistic == 'ppost':
log_statistic = np.log( | pd.read_csv(path) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
from utils.constants import Teams
# Pandas options for better printing
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import pytest
import pandas as pd
import numpy as np
from sugarplot import interpolate, normalize_pandas, normalize_reflectance, ureg
from pandas.testing import assert_frame_equal
def test_normalize_pandas_simple_multiply():
data1 = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'Current (nA)': [0, 0.1, 0.2, 0.3, 0.4, 0.5]})
data2= pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'Current (nA)': [
0,
0.16666666666666669,
0.33333333333333337,
0.5,
0.6666666666666666,
0.8333333333333335]})
multiplied_data_desired = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'power (nA ** 2)': [
0,
0.016666666666666669,
0.06666666666666668,
0.15,
0.26666666666666666,
0.41666666666666674]})
multiplied_data_actual = normalize_pandas(data1, data2)
assert_frame_equal(multiplied_data_actual, multiplied_data_desired)
def test_normalize_mul_integration():
data1 = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'Current (nA)': [0, 0.1, 0.2, 0.3, 0.4, 0.5]})
data2 = pd.DataFrame({
'Time (ms)': [0, 0.6, 1.2, 1.8, 2.4, 3.0, 3.6, 4.2, 4.8, 5.4],
'Current (nA)': [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]})
multiplied_data_desired = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'power (nA ** 2)': [
0,
0.016666666666666669,
0.06666666666666668,
0.15,
0.26666666666666666,
0.41666666666666674]})
multiplied_data_actual = normalize_pandas(data1, data2)
assert_frame_equal(multiplied_data_actual, multiplied_data_desired)
def test_normalize_div_integration():
data1 = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'Current (nA)': [0, 0.1, 0.2, 0.3, 0.4, 0.5]})
data2 = pd.DataFrame({
'Time (ms)': [0, 0.6, 1.2, 1.8, 2.4, 3.0, 3.6, 4.2, 4.8, 5.4],
'Current (nA)': [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]})
multiplied_data_desired = pd.DataFrame({
'Time (ms)': [0, 1, 2, 3, 4, 5],
'rel': [
np.NaN,
0.6,
0.6,
0.6,
0.6,
0.6]})
multiplied_data_actual = normalize_pandas(data1, data2, operation=np.divide, new_name='rel')
| assert_frame_equal(multiplied_data_actual, multiplied_data_desired) | pandas.testing.assert_frame_equal |
import tweepy
import pandas as pd
consumer_key= "XXX"
consumer_secret = "XXX"
access_token ="XXX"
access_token_secret= "XXX"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit = True)
date1 = "2020-07-20"
date =[]
user_id = []
verified = []
text = []
user = []
location = []
source = []
likes = []
followers = []
following = []
retweets = []
def get_tweets(date1,word):
count = 0
for tweet in tweepy.Cursor(api.search , q=word,count =1000,lang="en",since_id = date1).items():
print(tweet.created_at)
date.append(tweet.created_at)
print(tweet.id)
user_id.append(tweet.id)
print(tweet.user.verified)
verified.append(tweet.user.verified)
print(tweet.text)
text.append(tweet.text)
print(tweet.user.screen_name)
user.append(tweet.user.screen_name)
print(tweet.user.location)
location.append(tweet.user.location)
print(tweet.source)
source.append(tweet.source)
print(tweet.favorite_count)
likes.append(tweet.favorite_count)
print(tweet.user.followers_count)
followers.append(tweet.user.followers_count)
print(tweet.user.friends_count)
following.append(tweet.user.friends_count)
print(tweet.retweet_count)
retweets.append(tweet.retweet_count)
print('<--------------------------------------------------->')
count+=1
print(count)
get_tweets(date1,"#KanyeWest")
data = list(zip(date,user_id,verified,text,user,location,source,likes,followers,following,retweets))
df = pd.DataFrame(data =data, columns =["Date","Tweet_id","Verified","Tweet",
"User","Location","Source","Likes","Followers","Following","Retweets"])
df.to_csv('tweets_kanye_2.csv')
df1 = pd.read_csv(r'C:\Users\ROSHAN\Documents\GitHub\extracting-tweets-forecasting-the-upcoming-elections-in-the-us\Extracting Tweets\Data\tweets.csv')
frames = [df,df1]
result = | pd.concat(frames) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/18 9:53 AM
# @Author : R
# @File : TMDB_predict_2.py
# @Software: PyCharm
# @Software: PyCharm
# coding: utf-8
# # Kaggle for TMDB
# In[1]:
import numpy as np
import pandas as pd
import warnings
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
import lightgbm as lgb
import catboost as cat
from collections import Counter
warnings.filterwarnings('ignore')
# get_ipython().run_line_magic('matplotlib', 'inline')
# Data description
# id:每部电影的唯一标志
# belongs_to_collection:json格式下每部电影的tmdb id, 电影名、电影海报和电影背景的URL
# budget:电影预算,数值为0表示未知
# genres:电影风格列表,json文件,包含id、name
# homepage:电影官方主页的URL
# imdb_id:该电影在imdb数据库中的唯一id标志
# original_language:电影制作的原始语言,长度为2的字符串
# original_title:电影的原始名称,可能与belong_to_collection中的名称不同
# overview: 剧情摘要
# popularity: 电影的受欢迎程度,float数值表示
# poster_path: 电影海报的URL
# production_companies:json格式,电影制造公司的id、name
# production_countries:json格式,电影制造国家 2字符简称、全称
# release_date:电影上映时间
# runtime:电影时长
# spoken_languages:电影语言版本,json格式
# status:电影是否已经发布
# tagline: 电影的标语
# title: 电影的英文名称
# keywords:电影关键字,json格式
# cast: json格式,演员列表,包括id,name,性别等
# crew:电影制作人员的信息,包括导演,作者等
# revenue:总收入,待预测值
# # EDA
# EDA已做
# 特征工程以及预测
# 利用两个额外的数据集合
# 1.TMDB Competition Additional Features:本数据包含新的三个特征popularity2、rating、totalVotes
# 2.TMDB Competition Additional Training Data:额外的2000个训练数据,没有给定训练集中所有的属性
# In[52]:
# Feature Engineering & Prediction
def rmse(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
# 数据预处理函数,包括将非数值型属性转化为数值型
def prepare(df):
global json_cols
global train_dict
df[['release_month', 'release_day', 'release_year']] = df['release_date'].str.split('/', expand=True).replace(
np.nan, 0).astype(int)
df['release_year'] = df['release_year']
df.loc[(df['release_year'] <= 19) & (df['release_year'] < 100), "release_year"] += 2000
df.loc[(df['release_year'] > 19) & (df['release_year'] < 100), "release_year"] += 1900
# 获取发行日期的星期、季度信息
releaseDate = pd.to_datetime(df['release_date'])
df['release_dayofweek'] = releaseDate.dt.dayofweek
df['release_quarter'] = releaseDate.dt.quarter
# 对rating、totalVotes属性进行填充
rating_na = df.groupby(["release_year", "original_language"])['rating'].mean().reset_index()
df[df.rating.isna()]['rating'] = df.merge(rating_na, how='left', on=["release_year", "original_language"])
vote_count_na = df.groupby(["release_year", "original_language"])['totalVotes'].mean().reset_index()
df[df.totalVotes.isna()]['totalVotes'] = df.merge(vote_count_na, how='left',
on=["release_year", "original_language"])
# df['rating'] = df['rating'].fillna(1.5)
# df['totalVotes'] = df['totalVotes'].fillna(6)
# 构建一个新属性,weightRating
df['weightedRating'] = (df['rating'] * df['totalVotes'] + 6.367 * 1000) / (df['totalVotes'] + 1000)
# 考虑到不同时期的面额意义不同,对其进行“通货膨胀”,通货膨胀比例为1.8%/年
df['originalBudget'] = df['budget']
df['inflationBudget'] = df['budget'] + df['budget'] * 1.8 / 100 * (
2018 - df['release_year']) # Inflation simple formula
df['budget'] = np.log1p(df['budget'])
# 对crew、cast属性中人员性别构成进行统计
df['genders_0_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
df['genders_0_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
# 对belongs_to_collection、Keywords、cast进行统计
df['_collection_name'] = df['belongs_to_collection'].apply(lambda x: x[0]['name'] if x != {} else 0)
le = LabelEncoder()
le.fit(list(df['_collection_name'].fillna('')))
df['_collection_name'] = le.transform(df['_collection_name'].fillna('').astype(str))
df['_num_Keywords'] = df['Keywords'].apply(lambda x: len(x) if x != {} else 0)
df['_num_cast'] = df['cast'].apply(lambda x: len(x) if x != {} else 0)
df['_num_crew'] = df['crew'].apply(lambda x: len(x) if x != {} else 0)
df['_popularity_mean_year'] = df['popularity'] / df.groupby("release_year")["popularity"].transform('mean')
df['_budget_runtime_ratio'] = df['budget'] / df['runtime']
df['_budget_popularity_ratio'] = df['budget'] / df['popularity']
# df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
# df['_releaseYear_popularity_ratio'] = df['release_year'] / df['popularity']
# df['_releaseYear_popularity_ratio2'] = df['popularity'] / df['release_year']
df['_popularity_totalVotes_ratio'] = df['totalVotes'] / df['popularity']
df['_rating_popularity_ratio'] = df['rating'] / df['popularity']
df['_rating_totalVotes_ratio'] = df['totalVotes'] / df['rating']
# df['_totalVotes_releaseYear_ratio'] = df['totalVotes'] / df['release_year']
df['_budget_rating_ratio'] = df['budget'] / df['rating']
df['_runtime_rating_ratio'] = df['runtime'] / df['rating']
df['_budget_totalVotes_ratio'] = df['budget'] / df['totalVotes']
# 对是否有homepage分类
df['has_homepage'] = 1
df.loc[pd.isnull(df['homepage']), "has_homepage"] = 0
# 对belongs_to_collection是否为空分类
df['isbelongs_to_collectionNA'] = 0
df.loc[pd.isnull(df['belongs_to_collection']), "isbelongs_to_collectionNA"] = 1
# 对tagline是否为空分类
df['isTaglineNA'] = 0
df.loc[df['tagline'] == 0, "isTaglineNA"] = 1
# 对original——langues是否为English判定
df['isOriginalLanguageEng'] = 0
df.loc[df['original_language'] == "en", "isOriginalLanguageEng"] = 1
# 对电影名是否不同判定
df['isTitleDifferent'] = 1
df.loc[df['original_title'] == df['title'], "isTitleDifferent"] = 0
# 对电影是否上映判定
df['isMovieReleased'] = 1
df.loc[df['status'] != "Released", "isMovieReleased"] = 0
# 电影是否有摘要
df['isOverviewNA'] = 0
df.loc[pd.isnull(df['overview']), 'isOverviewNA'] = 1
# 获取collection id
df['collection_id'] = df['belongs_to_collection'].apply(lambda x: np.nan if len(x) == 0 else x[0]['id'])
# 对original——title等属性统计长度
df['original_title_letter_count'] = df['original_title'].str.len()
df['original_title_word_count'] = df['original_title'].str.split().str.len()
# 对title、overview、tagline统计长度或个数
df['title_word_count'] = df['title'].str.split().str.len()
df['overview_word_count'] = df['overview'].str.split().str.len()
df['tagline_word_count'] = df['tagline'].str.split().str.len()
df['len_title'] = df['title'].fillna('').apply(lambda x: len(str(x)))
# 对genres、production_conpany、country、cast、crew、spoken_languages统计
df['production_countries_count'] = df['production_countries'].apply(lambda x: len(x))
df['production_companies_count'] = df['production_companies'].apply(lambda x: len(x))
df['cast_count'] = df['cast'].apply(lambda x: len(x))
df['crew_count'] = df['crew'].apply(lambda x: len(x))
df['spoken_languages_count'] = df['spoken_languages'].apply(lambda x: len(x))
df['genres_count'] = df['genres'].apply(lambda x: len(x))
# 进行按年分组计算均值填充
df['meanruntimeByYear'] = df.groupby("release_year")["runtime"].aggregate('mean')
df['meanPopularityByYear'] = df.groupby("release_year")["popularity"].aggregate('mean')
df['meanBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('mean')
df['meantotalVotesByYear'] = df.groupby("release_year")["totalVotes"].aggregate('mean')
df['meanTotalVotesByRating'] = df.groupby("rating")["totalVotes"].aggregate('mean')
df['medianBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('median')
df['_popularity_theatrical_ratio'] = df['theatrical'] / df['popularity']
df['_budget_theatrical_ratio'] = df['budget'] / df['theatrical']
# runtime
df['runtime_cat_min_60'] = df['runtime'].apply(lambda x: 1 if (x <= 60) else 0)
df['runtime_cat_61_80'] = df['runtime'].apply(lambda x: 1 if (x > 60) & (x <= 80) else 0)
df['runtime_cat_81_100'] = df['runtime'].apply(lambda x: 1 if (x > 80) & (x <= 100) else 0)
df['runtime_cat_101_120'] = df['runtime'].apply(lambda x: 1 if (x > 100) & (x <= 120) else 0)
df['runtime_cat_121_140'] = df['runtime'].apply(lambda x: 1 if (x > 120) & (x <= 140) else 0)
df['runtime_cat_141_170'] = df['runtime'].apply(lambda x: 1 if (x > 140) & (x <= 170) else 0)
df['runtime_cat_171_max'] = df['runtime'].apply(lambda x: 1 if (x >= 170) else 0)
lang = df['original_language']
df_more_17_samples = [x[0] for x in Counter(pd.DataFrame(lang).stack()).most_common(17)]
for col in df_more_17_samples:
df[col] = df['original_language'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 12):
df['month' + str(col)] = df['release_month'].apply(lambda x: 1 if x == col else 0)
# feature engeneering : Release date per quarter one hot encoding
for col in range(1, 4):
df['quarter' + str(col)] = df['release_quarter'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 7):
df['dayofweek' + str(col)] = df['release_dayofweek'].apply(lambda x: 1 if x == col else 0)
# 新加入属性
df['is_release_day_of_1'] = 0
df.loc[df['release_day'] == 1, 'is_release_day_of_1'] = 1
df['is_release_day_of_15'] = 0
df.loc[df['release_day'] == 15, 'is_release_day_of_15'] = 1
# 新属性加入
# df['popularity2'] = np.log1p(df['popularity2'])
# df['popularity'] = np.log1p(df['popularity'])
# for col in range(1, 32):
# df['release_day' + str(col)] = df['release_day'].apply(lambda x: 1 if x == col else 0)
df['is_release_day_of_31'] = 0
df.loc[df['release_day'] == 31, 'is_release_day_of_15'] = 1
# popularity
# df['popularity_cat_25'] = df['popularity'].apply(lambda x: 1 if (x <= 25) else 0)
# df['popularity_cat_26_50'] = df['popularity'].apply(lambda x: 1 if (x > 25) & (x <= 50) else 0)
# df['popularity_cat_51_100'] = df['popularity'].apply(lambda x: 1 if (x > 50) & (x <= 100) else 0)
# df['popularity_cat_101_150'] = df['popularity'].apply(lambda x: 1 if (x > 100) & (x <= 150) else 0)
# df['popularity_cat_151_200'] = df['popularity'].apply(lambda x: 1 if (x > 150) & (x <= 200) else 0)
# df['popularity_cat_201_max'] = df['popularity'].apply(lambda x: 1 if (x >= 200) else 0)
#
# df['_runtime_totalVotes_ratio'] = df['runtime'] / df['totalVotes']
# df['_runtime_popularity_ratio'] = df['runtime'] / df['popularity']
#
# df['_rating_theatrical_ratio'] = df['theatrical'] / df['rating']
# df['_totalVotes_theatrical_ratio'] = df['theatrical'] / df['totalVotes']
# df['_budget_mean_year'] = df['budget'] / df.groupby("release_year")["budget"].transform('mean')
# df['_runtime_mean_year'] = df['runtime'] / df.groupby("release_year")["runtime"].transform('mean')
# df['_rating_mean_year'] = df['rating'] / df.groupby("release_year")["rating"].transform('mean')
# df['_totalVotes_mean_year'] = df['totalVotes'] / df.groupby("release_year")["totalVotes"].transform('mean')
# 对某些json属性,具有多个值的,进行类似‘one-hot编码’
for col in ['genres', 'production_countries', 'spoken_languages', 'production_companies', 'Keywords']:
df[col] = df[col].map(lambda x: sorted(
list(set([n if n in train_dict[col] else col + '_etc' for n in [d['name'] for d in x]])))).map(
lambda x: ','.join(map(str, x)))
temp = df[col].str.get_dummies(sep=',')
df = pd.concat([df, temp], axis=1, sort=False)
# 删除非数值属性和暂时未提出有用信息的属性
df.drop(['genres_etc'], axis=1, inplace=True)
df = df.drop(['belongs_to_collection', 'genres', 'homepage', 'imdb_id', 'overview','runtime'
, 'poster_path', 'production_companies', 'production_countries', 'release_date', 'spoken_languages'
, 'status', 'title', 'Keywords', 'cast', 'crew', 'original_language', 'original_title', 'tagline',
'collection_id'
], axis=1)
# 填充缺失值
df.fillna(value=0.0, inplace=True)
return df
# 对train中的某些数据手动处理
# 处理包括budget、revenue
# 对budget远小于revenue的情况统计,对其进行处理
# 处理原则,对于可以查询到的信息,进行真实数据填充,否则取当年同期同类型电影的均值
train = pd.read_csv('train.csv')
train.loc[train['id'] == 16, 'revenue'] = 192864 # Skinning
train.loc[train['id'] == 90, 'budget'] = 30000000 # Sommersby
train.loc[train['id'] == 118, 'budget'] = 60000000 # Wild Hogs
train.loc[train['id'] == 149, 'budget'] = 18000000 # Beethoven
train.loc[train['id'] == 313, 'revenue'] = 12000000 # The Cookout
train.loc[train['id'] == 451, 'revenue'] = 12000000 # Chasing Liberty
train.loc[train['id'] == 464, 'budget'] = 20000000 # Parenthood
train.loc[train['id'] == 470, 'budget'] = 13000000 # The Karate Kid, Part II
train.loc[train['id'] == 513, 'budget'] = 930000 # From Prada to Nada
train.loc[train['id'] == 797, 'budget'] = 8000000 # Welcome to Dongmakgol
train.loc[train['id'] == 819, 'budget'] = 90000000 # Alvin and the Chipmunks: The Road Chip
train.loc[train['id'] == 850, 'budget'] = 90000000 # Modern Times
train.loc[train['id'] == 1007, 'budget'] = 2 # Zyzzyx Road
train.loc[train['id'] == 1112, 'budget'] = 7500000 # An Officer and a Gentleman
train.loc[train['id'] == 1131, 'budget'] = 4300000 # Smokey and the Bandit
train.loc[train['id'] == 1359, 'budget'] = 10000000 # Stir Crazy
train.loc[train['id'] == 1542, 'budget'] = 1 # All at Once
train.loc[train['id'] == 1570, 'budget'] = 15800000 # Crocodile Dundee II
train.loc[train['id'] == 1571, 'budget'] = 4000000 # Lady and the Tramp
train.loc[train['id'] == 1714, 'budget'] = 46000000 # The Recruit
train.loc[train['id'] == 1721, 'budget'] = 17500000 # Cocoon
train.loc[train['id'] == 1865, 'revenue'] = 25000000 # Scooby-Doo 2: Monsters Unleashed
train.loc[train['id'] == 1885, 'budget'] = 12 # In the Cut
train.loc[train['id'] == 2091, 'budget'] = 10 # Deadfall
train.loc[train['id'] == 2268, 'budget'] = 17500000 # Madea Goes to Jail budget
train.loc[train['id'] == 2491, 'budget'] = 6 # Never Talk to Strangers
train.loc[train['id'] == 2602, 'budget'] = 31000000 # Mr. Holland's Opus
train.loc[train['id'] == 2612, 'budget'] = 15000000 # Field of Dreams
train.loc[train['id'] == 2696, 'budget'] = 10000000 # Nurse 3-D
train.loc[train['id'] == 2801, 'budget'] = 10000000 # Fracture
train.loc[train['id'] == 335, 'budget'] = 2
train.loc[train['id'] == 348, 'budget'] = 12
train.loc[train['id'] == 470, 'budget'] = 13000000
train.loc[train['id'] == 513, 'budget'] = 1100000
train.loc[train['id'] == 640, 'budget'] = 6
train.loc[train['id'] == 696, 'budget'] = 1
train.loc[train['id'] == 797, 'budget'] = 8000000
train.loc[train['id'] == 850, 'budget'] = 1500000
train.loc[train['id'] == 1199, 'budget'] = 5
train.loc[train['id'] == 1282, 'budget'] = 9 # Death at a Funeral
train.loc[train['id'] == 1347, 'budget'] = 1
train.loc[train['id'] == 1755, 'budget'] = 2
train.loc[train['id'] == 1801, 'budget'] = 5
train.loc[train['id'] == 1918, 'budget'] = 592
train.loc[train['id'] == 2033, 'budget'] = 4
train.loc[train['id'] == 2118, 'budget'] = 344
train.loc[train['id'] == 2252, 'budget'] = 130
train.loc[train['id'] == 2256, 'budget'] = 1
train.loc[train['id'] == 2696, 'budget'] = 10000000
# test异常处理
test = pd.read_csv('test.csv')
# Clean Data
test.loc[test['id'] == 6733, 'budget'] = 5000000
test.loc[test['id'] == 3889, 'budget'] = 15000000
test.loc[test['id'] == 6683, 'budget'] = 50000000
test.loc[test['id'] == 5704, 'budget'] = 4300000
test.loc[test['id'] == 6109, 'budget'] = 281756
test.loc[test['id'] == 7242, 'budget'] = 10000000
test.loc[test['id'] == 7021, 'budget'] = 17540562 # Two Is a Family
test.loc[test['id'] == 5591, 'budget'] = 4000000 # The Orphanage
test.loc[test['id'] == 4282, 'budget'] = 20000000 # Big Top Pee-wee
test.loc[test['id'] == 3033, 'budget'] = 250
test.loc[test['id'] == 3051, 'budget'] = 50
test.loc[test['id'] == 3084, 'budget'] = 337
test.loc[test['id'] == 3224, 'budget'] = 4
test.loc[test['id'] == 3594, 'budget'] = 25
test.loc[test['id'] == 3619, 'budget'] = 500
test.loc[test['id'] == 3831, 'budget'] = 3
test.loc[test['id'] == 3935, 'budget'] = 500
test.loc[test['id'] == 4049, 'budget'] = 995946
test.loc[test['id'] == 4424, 'budget'] = 3
test.loc[test['id'] == 4460, 'budget'] = 8
test.loc[test['id'] == 4555, 'budget'] = 1200000
test.loc[test['id'] == 4624, 'budget'] = 30
test.loc[test['id'] == 4645, 'budget'] = 500
test.loc[test['id'] == 4709, 'budget'] = 450
test.loc[test['id'] == 4839, 'budget'] = 7
test.loc[test['id'] == 3125, 'budget'] = 25
test.loc[test['id'] == 3142, 'budget'] = 1
test.loc[test['id'] == 3201, 'budget'] = 450
test.loc[test['id'] == 3222, 'budget'] = 6
test.loc[test['id'] == 3545, 'budget'] = 38
test.loc[test['id'] == 3670, 'budget'] = 18
test.loc[test['id'] == 3792, 'budget'] = 19
test.loc[test['id'] == 3881, 'budget'] = 7
test.loc[test['id'] == 3969, 'budget'] = 400
test.loc[test['id'] == 4196, 'budget'] = 6
test.loc[test['id'] == 4221, 'budget'] = 11
test.loc[test['id'] == 4222, 'budget'] = 500
test.loc[test['id'] == 4285, 'budget'] = 11
test.loc[test['id'] == 4319, 'budget'] = 1
test.loc[test['id'] == 4639, 'budget'] = 10
test.loc[test['id'] == 4719, 'budget'] = 45
test.loc[test['id'] == 4822, 'budget'] = 22
test.loc[test['id'] == 4829, 'budget'] = 20
test.loc[test['id'] == 4969, 'budget'] = 20
test.loc[test['id'] == 5021, 'budget'] = 40
test.loc[test['id'] == 5035, 'budget'] = 1
test.loc[test['id'] == 5063, 'budget'] = 14
test.loc[test['id'] == 5119, 'budget'] = 2
test.loc[test['id'] == 5214, 'budget'] = 30
test.loc[test['id'] == 5221, 'budget'] = 50
test.loc[test['id'] == 4903, 'budget'] = 15
test.loc[test['id'] == 4983, 'budget'] = 3
test.loc[test['id'] == 5102, 'budget'] = 28
test.loc[test['id'] == 5217, 'budget'] = 75
test.loc[test['id'] == 5224, 'budget'] = 3
test.loc[test['id'] == 5469, 'budget'] = 20
test.loc[test['id'] == 5840, 'budget'] = 1
test.loc[test['id'] == 5960, 'budget'] = 30
test.loc[test['id'] == 6506, 'budget'] = 11
test.loc[test['id'] == 6553, 'budget'] = 280
test.loc[test['id'] == 6561, 'budget'] = 7
test.loc[test['id'] == 6582, 'budget'] = 218
test.loc[test['id'] == 6638, 'budget'] = 5
test.loc[test['id'] == 6749, 'budget'] = 8
test.loc[test['id'] == 6759, 'budget'] = 50
test.loc[test['id'] == 6856, 'budget'] = 10
test.loc[test['id'] == 6858, 'budget'] = 100
test.loc[test['id'] == 6876, 'budget'] = 250
test.loc[test['id'] == 6972, 'budget'] = 1
test.loc[test['id'] == 7079, 'budget'] = 8000000
test.loc[test['id'] == 7150, 'budget'] = 118
test.loc[test['id'] == 6506, 'budget'] = 118
test.loc[test['id'] == 7225, 'budget'] = 6
test.loc[test['id'] == 7231, 'budget'] = 85
test.loc[test['id'] == 5222, 'budget'] = 5
test.loc[test['id'] == 5322, 'budget'] = 90
test.loc[test['id'] == 5350, 'budget'] = 70
test.loc[test['id'] == 5378, 'budget'] = 10
test.loc[test['id'] == 5545, 'budget'] = 80
test.loc[test['id'] == 5810, 'budget'] = 8
test.loc[test['id'] == 5926, 'budget'] = 300
test.loc[test['id'] == 5927, 'budget'] = 4
test.loc[test['id'] == 5986, 'budget'] = 1
test.loc[test['id'] == 6053, 'budget'] = 20
test.loc[test['id'] == 6104, 'budget'] = 1
test.loc[test['id'] == 6130, 'budget'] = 30
test.loc[test['id'] == 6301, 'budget'] = 150
test.loc[test['id'] == 6276, 'budget'] = 100
test.loc[test['id'] == 6473, 'budget'] = 100
test.loc[test['id'] == 6842, 'budget'] = 30
release_dates = pd.read_csv('release_dates_per_country.csv')
release_dates['id'] = range(1,7399)
release_dates.drop(['original_title','title'],axis = 1,inplace = True)
release_dates.index = release_dates['id']
train = pd.merge(train, release_dates, how='left', on=['id'])
test = | pd.merge(test, release_dates, how='left', on=['id']) | pandas.merge |
# Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import numbers
from probatus.utils import DimensionalityError
import warnings
def check_1d(x):
"""
Checks whether or not a list, numpy array, pandas dataframe, pandas series are one-dimensional.
Returns True when check is ok, otherwise throws a `DimensionalityError`
Args:
x: list, numpy array, pandas dataframe, pandas series
Returns: True or throws `DimensionalityError`
"""
if isinstance(x, list):
if any([isinstance(el, list) for el in x]):
raise DimensionalityError("The input is not 1D")
else:
return True
if isinstance(x, np.ndarray):
if x.ndim == 1 and all([isinstance(el, numbers.Number) for el in x]):
return True
else:
raise DimensionalityError("The input is not 1D")
if isinstance(x, pd.core.frame.DataFrame):
if len(x.columns) == 1 and | pd.api.types.is_numeric_dtype(x[x.columns[0]]) | pandas.api.types.is_numeric_dtype |
"""
Plot as-run river time series.
"""
from lo_tools import Lfun
from lo_tools import plotting_functions as pfun
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
Ldir = Lfun.Lstart(gridname='cas6', tag='v3')
# load extraction (an xarray Dataset)
#fn = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_roms' / 'extraction_2018.01.01_2018.12.31.nc'
fn = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_roms' / 'extraction_2017.01.01_2020.12.31.nc'
x = xr.load_dataset(fn)
# get climatology
clm_fn = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_historical' / 'CLIM_flow_1980_2020.p'
dfc = pd.read_pickle(clm_fn)
# add the climatology, for practice
x['transport_clim'] = 0*x.transport
x['yearday'] = (('time'), x.time.to_index().dayofyear.to_numpy())
ydvec = x.yearday.values
# add the climatology to the xarray dataset (maybe use groupby instead?)
for rn in list(x.riv.values):
if rn in dfc.columns:
this_riv = dfc[rn] # a Series
this_riv_clim = 0 * ydvec
for ii in range(1,367):
this_riv_clim[ydvec==ii] = this_riv[ii]
x.transport_clim.loc[:,rn] = this_riv_clim
else:
print('Missing ' + rn)
# plotting
plt.close('all')
pfun.start_plot()
fig = plt.figure()
# for plotting time series we are better off using pandas
df = | pd.DataFrame(index=x.time.values) | pandas.DataFrame |
import json
import logging
import os
from pathlib import Path
import re
import yaml
import numpy as np
import pandas as pd
from biclust_comp.analysis import benchmarking
import biclust_comp.analysis.accuracy as acc
from biclust_comp import logging_utils, utils
DATASET_ORDER = [
"base",
"N50-T2",
"N10-T20",
"N100-T10",
"N500-T10",
"G100",
"G5000",
"large-K20",
"Negbin-medium",
"Negbin-high",
"Gaussian",
"Gaussian-medium",
"Gaussian-high",
"noiseless",
"sparse",
"dense",
"sparse-square",
"dense-square",
"K5",
"K10",
"K50",
"K70",
"large-K100",
"large-K400"
]
DATASET_NAMES = {'simulated/constant_negbin/size_mixed/K20_N10_G1000_T10':'base',
'simulated/constant_negbin/size_mixed/K20_N50_G1000_T2':'N50-T2',
'simulated/constant_negbin/size_mixed/K20_N10_G1000_T20':'N10-T20',
'simulated/constant_negbin/size_mixed/K20_N100_G1000_T10':'N100-T10',
'simulated/constant_negbin/size_mixed/K20_N500_G1000_T10':'N500-T10',
'simulated/constant_negbin/size_mixed/K20_N10_G100_T10':'G100',
'simulated/constant_negbin/size_mixed/K20_N10_G5000_T10':'G5000',
'simulated/constant_negbin/size_mixed/K20_N300_G10000_T20':'large-K20',
'simulated/constant_gaussian/size_mixed/K20_N10_G1000_T10':'Gaussian',
'simulated/constant/size_mixed/K20_N10_G1000_T10':'noiseless',
'simulated/constant_negbin_1e-1/size_mixed/K20_N10_G1000_T10':'Negbin\nmedium',
'simulated/constant_negbin_1e-2/size_mixed/K20_N10_G1000_T10':'Negbin\nhigh',
'simulated/constant_gaussian_100/size_mixed/K20_N10_G1000_T10':'Gaussian\nmedium',
'simulated/constant_gaussian_300/size_mixed/K20_N10_G1000_T10':'Gaussian\nhigh',
'simulated/constant_negbin/size_mixed_small/K20_N10_G1000_T10':'sparse',
'simulated/constant_negbin/size_mixed_large/K20_N10_G1000_T10':'dense',
'simulated/constant_negbin/square_size_mixed_small/K20_N10_G1000_T10':'sparse-square',
'simulated/constant_negbin/square_size_mixed_large/K20_N10_G1000_T10':'dense-square',
'simulated/constant/size_mixed_large/K5_N10_G1000_T10': 'large-K5',
'simulated/constant/size_mixed_large/K1_N10_G1000_T10': 'large-K1',
'simulated/constant/size_mixed_large/K2_N10_G1000_T10': 'large-K2',
'simulated/moran_gaussian/moran_spare_dense/K5_N10_G1000_T10': 'moran-K5',
'simulated/moran_gaussian/moran_spare_dense/K10_N30_G1000_T10': 'moran-K10',
'simulated/moran_gaussian/moran_spare_dense/K15_N30_G1000_T10': 'moran-K15',
'simulated/moran_gaussian/moran_spare_dense/K15_N300_G1000_T1': 'moran-K15-T1',
'simulated/shift_scale_0/size_mixed/K20_N10_G1000_T10': 'constant\nsamples',
'simulated/shift_scale_1/size_mixed/K20_N10_G1000_T10': 'shift',
'simulated/shift_scale_0_1/size_mixed/K20_N10_G1000_T10': 'scale-1',
'simulated/shift_scale_1_1/size_mixed/K20_N10_G1000_T10': 'shift-scale-1',
'simulated/shift_scale_0_5e-1/size_mixed/K20_N10_G1000_T10': 'scale',
'simulated/shift_scale_1_5e-1/size_mixed/K20_N10_G1000_T10': 'shift-scale',
'simulated/constant_negbin/size_mixed/K5_N10_G1000_T10':'K5',
'simulated/constant_negbin/size_mixed/K10_N10_G1000_T10':'K10',
'simulated/constant_negbin/size_mixed/K50_N10_G1000_T10':'K50',
'simulated/constant_negbin/size_mixed/K70_N10_G1000_T10':'K70',
'simulated/constant_negbin/size_mixed/K100_N300_G10000_T20':'large-K100',
'simulated/constant_negbin/size_mixed/K400_N300_G10000_T20':'large-K400',
'simulated/constant/size_mixed_large/K10_N10_G1000_T10':'sweep-Gaussian',
'simulated/constant_gaussian/size_mixed_small/K50_N10_G1000_T10':'sweep-noiseless'}
IMPC_DATASET_ORDER = [
'Size factor',
'Log',
'Gaussian',
'Size factor (Tensor)',
'Log (Tensor)',
'Gaussian (Tensor)'
]
IMPC_DATASET_NAMES = {
'real/IMPC/deseq_sf/raw/small_pathways': 'Size factor',
'real/IMPC/log/small_pathways': 'Log',
'real/IMPC/quantnorm/small_pathways': 'Gaussian',
'real/IMPC/tensor/deseq_sf/raw/small_pathways': 'Size factor (Tensor)',
'real/IMPC/tensor/log/small_pathways': 'Log (Tensor)',
'real/IMPC/tensor/quantnorm/small_pathways': 'Gaussian (Tensor)',
}
# Based on threshold_clust_err.png with K datasets - trying to keep it simple as possible
# Threshold 1e-2, with the exception of Plaid which gets threshold 0 (no other option)
BEST_THRESHOLD = {'Plaid': '_thresh_0e+0',
'SDA': '_thresh_1e-2',
'nsNMF': '_thresh_1e-2',
'BicMix': '_thresh_1e-2',
'BicMix-Q': '_thresh_1e-2',
'BicMix_Q': '_thresh_1e-2',
'SSLB': '_thresh_1e-2',
'FABIA': '_thresh_1e-2',
'SNMF': '_thresh_1e-2',
'MultiCluster': '_thresh_1e-2'}
FAILURE_VALUES = {'clust_err': 0,
'tensor': 'non-tensor',
'traits_tissue_mean_f1_score': 0,
'traits_genotype_mean_f1_score': 0,
'traits_mean_f1_score': 0,
'traits_factors_mean_max_f1_score': 0,
'factors_pathways_nz_alpha 1': 0,
'factors_pathways_nz_alpha 0.1': 0,
'factors_pathways_nz_alpha 0.05': 0,
'factors_pathways_nz_alpha 0.01': 0,
'factors_pathways_nz_alpha 0.001': 0,
'factors_pathways_nz_alpha 0.0001': 0,
'factors_pathways_nz_alpha 1e-05': 0,
'ko_traits_nz_alpha 1': 0,
'ko_traits_nz_alpha 0.1': 0,
'ko_traits_nz_alpha 0.05': 0,
'ko_traits_nz_alpha 0.01': 0,
'ko_traits_nz_alpha 0.001': 0,
'ko_traits_nz_alpha 0.0001': 0,
'ko_traits_nz_alpha 1e-05': 0,
'recon_error_normalised': 1,
'recovered_K': 0,
'redundancy_average_max': 0,
'redundancy_max': 0,
'redundancy_mean': 0,
'adjusted_redundancy_mean': 0}
EXPECTED_SIMULATED_RUNIDS="analysis/accuracy/expected_method_dataset_run_ids.txt"
EXPECTED_IMPC_RUNIDS="analysis/IMPC/expected_method_dataset_run_ids.txt"
EXPECTED_IMPC_RUNIDS_ALL="analysis/IMPC/expected_method_dataset_run_ids_all.txt"
def read_result_binary_best_threshold(folder):
method = folder.split('/')[1]
threshold_str = BEST_THRESHOLD[method]
threshold = utils.threshold_str_to_float(threshold_str)
return utils.read_result_threshold_binary(folder, threshold)
def calculate_adjusted_mean_redundancy_IMPC(df):
if 'recovered_K' not in df:
assert df['recovered_K_y'].astype(int).equals(df['recovered_K_x'].astype(int)), \
"Expected column 'recovered_K' in the dataframe, but if not then expected " \
"columns recovered_K_x and recovered_K_y, which should be equal.\n" \
f"{df['recovered_K_x'][:10]}\n{df['recovered_K_y'][:10]}"
df['recovered_K'] = df['recovered_K_x']
return calculate_adjusted_mean_redundancy(df)
def calculate_adjusted_mean_redundancy(df):
# In construction of accuracy dataframes we calculated mean_redundancy as
# the mean of the Jaccard matrix, with diagonal entries set to 0.
# We actually want the mean of *off-diagonal* entries.
# Example showing the difference: 2 factors returned, identical.
# Jaccard matrix with diagonal 0 is [[0,1], [1,0]], which has mean 1/2
# off-diagonal mean is 1
# Let S be the sum of the off-diagonal entries. We have mean_redundancy = S/K**2
# and want adjusted_mean_redundancy = S / (K**2 - K)
# So we should multiply the scores by (K**2 -K)/K**2, or equivalently (K-1)/K
scale_factors = (df['recovered_K'] - 1)/(df['recovered_K'])
adjusted = df['redundancy_mean'] * scale_factors
return adjusted
def extract_run_info_IMPC(run_id):
match = re.match(r'^run_seed_(\d+)_K_(\d+)(_qnorm_0)?$',
run_id)
return match.groups()
def extract_dataset_info_IMPC(dataset_name):
tensor = "(tensor|liver)?/?"
preprocess = "(deseq/raw|deseq/log|deseq_sf/raw|quantnorm|scaled|raw|log)"
gene_selection = "(pooled_cv|pooled_log|small_pathways|pooled)"
num_genes = "/?(5000|10000)?"
pattern = re.compile(f"real/IMPC/{tensor}{preprocess}/{gene_selection}{num_genes}$")
match = re.match(pattern,
dataset_name)
if match is None:
logging.error(f"{dataset_name} doesn't match expected form for IMPC dataset")
return match.groups()
def add_info_columns_IMPC(df):
extracted_info = df['dataset'].apply(extract_dataset_info_IMPC)
df['tensor'], df['preprocess'], df['gene_selection'], df['num_genes'] = zip(*extracted_info)
print(df['tensor'].value_counts())
df = df.fillna({'postprocessing': '_',
'tensor': 'non-tensor'})
if 'run_id' in df.columns:
df['_seed'], df['_K_init'], df['qnorm'] = zip(*df['run_id'].apply(extract_run_info_IMPC))
df['_K_init'] = df['_K_init'].astype(int)
df['_method'] = df['method'].copy()
df.loc[(df['method'] == 'BicMix') & (df['qnorm'].isna()), 'method'] = 'BicMix-Q'
df['method_dataset_run_id'] = df['_method'] + '/' + \
df['dataset'] + '/' + \
df['run_id']
return df
def restrict_to_expected_runs_list(df, expected_runs_list):
return df[df.method_dataset_run_id.isin(expected_runs_list)]
def restrict_to_expected_runs(df, expected_runs_file):
if expected_runs_file is None:
restricted = df
else:
with open(expected_runs_file, 'r') as (f):
method_dataset_run_ids = [line.strip() for line in f.readlines()]
restricted = restrict_to_expected_runs_list(df, method_dataset_run_ids)
return restricted
def add_baseline_rows(df, baseline_df):
"""Add any rows from baseline_df whose dataset matches one of the datasets in the df,
and whose method is 'baseline_XB_true'."""
datasets = df.dataset.unique()
restricted_baseline_df = baseline_df[baseline_df['dataset'].isin(datasets)]
df_w_baseline = pd.concat([df,
restricted_baseline_df[restricted_baseline_df['method'] == 'baseline_XB_true']])
df_w_baseline.method.replace({'baseline_XB_true': 'BASELINE'}, inplace=True)
return df_w_baseline
def impc_pick_theoretical_best_K_init(row):
theoretical_best_K_init = 50
return theoretical_best_K_init == row['K_init']
def impc_restrict_to_best_theoretical_K_init(df):
df_theoretical_best_K_init = df[df.apply(impc_pick_theoretical_best_K_init, axis=1)]
return df_theoretical_best_K_init
def pick_theoretical_best_K_init(row):
theoretical_best_K_init = row['K']
if row['method'] in ('BicMix', 'BicMix-Q', 'SSLB'):
if row['K'] == 20:
theoretical_best_K_init = 25
else:
theoretical_best_K_init = row['K'] + 10
return theoretical_best_K_init == row['K_init']
def pick_mean_best_K_init(row, best_K_init_dict):
best_mean_K_init = best_K_init_dict[(row['method'], row['seedless_dataset'])]
return best_mean_K_init == row['K_init']
def restrict_to_best_mean_K_init(df, param_to_optimise='clust_err'):
means = df.groupby(['method', 'seedless_dataset', 'K_init'])[param_to_optimise].mean()
best_K_init = pd.DataFrame(means.unstack().idxmax(axis=1)).reset_index()
best_K_init.columns = ['method', 'seedless_dataset', 'K_init']
print(best_K_init)
best_K_init_dict = {(row['method'], row['seedless_dataset']) : row['K_init']
for row in best_K_init.to_dict('records')}
df_mean_best_K_init = df[df.apply(lambda row: pick_mean_best_K_init(row, best_K_init_dict),
axis=1)]
return df_mean_best_K_init
def restrict_to_best_theoretical_K_init(df):
df_theoretical_best_K_init = df[df.apply(pick_theoretical_best_K_init, axis=1)]
return df_theoretical_best_K_init
def restrict_to_best_threshold(df):
return df[df['processing'] == df['method'].map(BEST_THRESHOLD)]
def add_na_rows_expected_runs(df, expected_runs_file, processing=None, plaid_processing='_thresh_0e+0'):
logging.info(f"Full list of columns in df is {list(df.columns)}")
if expected_runs_file is None:
combined = df
else:
with open(expected_runs_file, 'r') as f:
method_dataset_run_ids = [line.strip() for line in f.readlines()]
expected_runs = set(method_dataset_run_ids)
logging.info(f"Expected {len(expected_runs)} runs, first: {logging_utils.get_example_element(expected_runs)}")
actual_runs = set(df.method_dataset_run_id.unique())
logging.info(f"Actually found {len(actual_runs)} runs, first: {logging_utils.get_example_element(actual_runs)}")
failed_runs = expected_runs.difference(actual_runs)
logging.info(f"Missing {len(failed_runs)} runs, first: {logging_utils.get_example_element(failed_runs)}")
all_runs = '\n'.join(sorted(failed_runs))
logging.debug(f"Missing runs:\n{all_runs}")
if len(failed_runs) > 0:
failed_runs_dicts = [read_information_from_mdr_id(failed_run) for failed_run in failed_runs]
failed_runs_df = pd.DataFrame(failed_runs_dicts)
logging.info(failed_runs_df)
logging.info(f"Full list of columns in failed_runs_df is {list(failed_runs_df.columns)}")
if processing is None:
processing = list(df.processing.unique())
logging.info(f"Using list of processing values: {processing}")
assert plaid_processing in processing
copies = []
for processing_str in processing:
copy = failed_runs_df.copy()
copy['processing'] = processing_str
copies.append(copy)
failed_runs_processed = pd.concat(copies)
logging.info(failed_runs_processed)
logging.info(f"Full list of columns in failed_runs_processed is {list(failed_runs_processed.columns)}")
failed_runs_processed = failed_runs_processed[(failed_runs_processed.method != 'Plaid') |
(failed_runs_processed.processing == plaid_processing)]
combined = pd.concat([df, failed_runs_processed])
else:
combined = df
return combined
def read_information_from_mdr_id(method_dataset_run_id):
run_info = {}
# If we have 'results/' at start, remove it
if method_dataset_run_id.startswith('results/'):
method_dataset_run_id = method_dataset_run_id[len('results/'):]
split_mdr_id = method_dataset_run_id.split("/")
run_info['method_dataset_run_id'] = method_dataset_run_id
run_info['method'] = split_mdr_id[0]
run_info['dataset'] = "/".join(split_mdr_id[1:-1])
if 'seed' in run_info['dataset']:
run_info['seedless_dataset'] = "/".join(split_mdr_id[1:-2])
else:
run_info['seedless_dataset'] = run_info['dataset']
if 'simulated' in run_info['dataset']:
match = re.match(r'simulated/(\w*)/(\w*)/K(\d+)_.*/seed_(\d+)',
run_info['dataset'])
if match is not None:
run_info['noise'] = match[1]
run_info['K'] = int(match[3])
run_info['sim_seed'] = match[4]
elif 'IMPC' in run_info['dataset']:
matches = extract_dataset_info_IMPC(run_info['dataset'])
run_info['tensor'] = matches[0]
run_info['preprocess'] = matches[1]
run_info['gene_selection'] = matches[2]
run_info['num_genes'] = matches[3]
run_info['run_id'] = split_mdr_id[-1]
match = re.match(r'run_seed_\d+_K_(\d+)', run_info['run_id'])
run_info['K_init'] = int(match[1])
run_info['_method'] = run_info['method']
if run_info['method'] == 'BicMix':
if 'qnorm_0' in run_info['run_id']:
run_info['method'] = 'BicMix'
else:
run_info['method'] = 'BicMix-Q'
return run_info
def construct_params_df(params_files):
"""Given a list of params.json files, read in each and label with the method, dataset
and run_id indicated by the filename. Return as a dataframe, with each row corresponding
to one params.json file."""
params_dicts = []
methods = []
datasets = []
run_ids = []
for params_file in params_files:
# Read in parameters from JSON file
with open(params_file, 'r') as f:
params = json.load(f)
params_dicts.append(params)
# Deduce the method, dataset and run_id from the filename
match = re.match(r'[/\w]*results/(\w+)/([/\-\w]+)/(run_.+)/params.json$', params_file)
methods.append(match.groups()[0])
datasets.append(match.groups()[1])
run_ids.append(match.groups()[2])
# Concatenate all parameters into a dataframe
# and add method, dataset and run_id columns
params_df = | pd.DataFrame(params_dicts) | pandas.DataFrame |
import pandas as pd
import numpy as np
import git
import os
import sys
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import bokeh.plotting as bkp
from bokeh.models import Span
import holoviews as hv
from pathlib import Path
# from bokeh.io import export_png
#-- Setup paths
# Get parent directory using git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Change working directory to parent directory
os.chdir(homedir)
# Add 'Dan' directory to the search path for imports
sys.path.append('Dan')
# Import our custom cube managing functions
import cube_formatter as cf
#-- Setup bokeh
bokeh.io.output_notebook()
hv.extension('bokeh')
#-- Control parameters
# Top N counties to plot with the most deaths
# Set to -1 to plot all
plotN = 20
shift = 20
# Data Manipulation flags (should match those used in creating submission file)
isAllocCounties = True # Flag to distribue state deaths amongst counties
isComputeDaily = False # Flag to translate cummulative data to daily counts
#- Plot-type control flags
isStateWide = False # Flag to plot state-wise data (will use nyt_states file for true_df)
# The raw cube won't be affected so make sure it is also state-wise data
# AND cumulative since there is only cumulative nyt_us_states data
isCumul = True # Flag to denote that the plot should be cumulative, not daily deaths
# ** Only affects county-level data since state-wide is implicitly cumulative
# This sets which county-wide nyt file is used and sets the plot y-axis label
# Key days (should match those used in creating the cube)
global_dayzero = pd.to_datetime('2020 Jan 21')
# Day until which model was trained (train_til in epid model)
# Leave as None to not display a boundary
boundary = '2020 May 10'
# Day to use for allocating to counties
# Leave as None to use most recent date
# OR use '2020-04-23' format to allocate based on proportions from that day
alloc_day = '2020-05-10'
# Flag to choose whether to save .svg of figures
is_saveSVG = False
# Filename (including path) for saving .svg files when is_saveSVG=True
# county, state, and fips will be appended to the name to differentiate plots
svg_flm = 'Dan/MidtermFigs/CountyWideDaily2/'
#-- Files to utilize
# Filename for cube of model data
# should be (row=sample, col=day, pane=state) with state FIPS as beef in row1
mat_model = 'Alex\\PracticeOutputs\\fresh.mat'#'Dan\\train_til_today.csv'
# Reference file to treat as "true" death counts
csv_true = 'data\\us\\covid\\nyt_us_counties_daily.csv' # daily county counts (also used for allocating deaths when req.)
csv_ST_true = 'data\\us\\covid\\nyt_us_states.csv' # this is cumulative ONLY; no _daily version exists
csv_CT_cumul_true = 'data\\us\\covid\\nyt_us_counties.csv' # county cumulative counts
# reference file for clustering df
# This assignment as done below assumes that the right file just has _clusters.csv appended.
# You can enter the actual path manually if you'd like
cluster_ref_fln=os.path.splitext(mat_model)[0] + '_clusters.csv'
#-- Read and format true data to have correct columns
# Read correct file for requested setup
if isStateWide:
# Plotting state-wide so use nyt state file (implicitly cumulative)
true_df = pd.read_csv(csv_ST_true)
else:
if isCumul:
# plotting cumulative county-wide so pull this file
true_df = pd.read_csv(csv_CT_cumul_true)
else:
# plotting daily county-wide so pull this file
true_df = pd.read_csv(csv_true)
# The nyt_us_counties.csv file is SUPER FLAWED so we need to fix this:
# - has some empty values in the fips column cousing prob. with .astype(int)
# - Straight up doesn't have fips entry for NYC so need to hardcode its fips
if (not isStateWide) and isCumul:
# Reading in problematic file.
# Replace empty value on NYC with 36061
true_df.loc[true_df.county=='New York City', 'fips'] = 36061
# Remove rows with nans from the df (these are the counties we don't care about)
true_df = true_df[true_df['fips'].notna()]
# Reformat some columns
true_df['fips'] = true_df['fips'].astype(int)
true_df['id'] = true_df['date'] + '-' + true_df['fips'].astype(str)
#-- Read and format model data to county-based
# read raw cube from epid. code
model_cube = cf.read_cube(mat_model)
# format to county-based in same way as format_sub
if isComputeDaily:
model_cube = cf.calc_daily(model_cube)
if isAllocCounties:
model_cube = cf.alloc_fromCluster(model_cube, cluster_ref_fln, alloc_day=alloc_day)
#-- Calculate quantiles for all modeled counties
# Quantiles to consider
perc_list = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# Calculate along each column ignoring the first row of beef
model_quants = np.percentile(model_cube[1:,:,:],perc_list,0)
# model_quants now has 9 rows, one for each of the quantiles requested
# The cols and panes are the same format as model_cube
#-- Order model counties by peak deaths/day predicted AND extract counties for plotting from the cube
# Get maximum deaths/day ever hit by each county
# Use 4th row of model_quants to use the 50th percentile (ie. the central prediction)
peak_daily_deaths = np.max(model_quants[4,:,:],0)
# Get indices of sorted (descending) vector
# NOTE: argsort only works in ascdending order so use [::-1] to reverse
peak_inds = np.argsort(peak_daily_deaths)[::-1]
# Take the largest plotN counties (since these are the only ones requested by the user)
peak_inds = peak_inds[shift:plotN+shift]
# Extract the resulting counties
# results will be implicitly sorted due to use of argsort
model_quants = model_quants[:,:,peak_inds] # Get quantiles
model_fips = model_cube[0,0,peak_inds] # Get fips ID's
#-- Extract the same counties from the true data and add column with datetime date
# Pull desired counties from true_df
true_df = true_df[true_df.fips.isin(model_fips)]
# Add column of dates in datetime format
true_df['dateDT'] = pd.to_datetime(true_df['date'].values)
if isAllocCounties:
#-- Read in cluster-to-fips translation (used for showing which counties were clustered)
# Load cluster data
fips_to_clst = pd.read_csv(cluster_ref_fln)
# Extract useful columns
fips_to_clst = fips_to_clst[['fips', 'cluster']]
# Cast fips and cluster values to int
fips_to_clst['fips'] = fips_to_clst['fips'].astype('int')
fips_to_clst['cluster'] = fips_to_clst['cluster'].astype('int')
# Cast to pandas series
fips_to_clst = pd.Series(fips_to_clst.set_index('fips')['cluster'])
else:
# Define empty list so that "in" check later doesn't cause errors
fips_to_clst = []
#-- Create directory for output .svg files if necessary
if is_saveSVG:
# Append sample filename just to get proper path
tmp_flm = '%sstate_county_fips.svg'%svg_flm
# Create directory if necessary
Path(tmp_flm).parent.mkdir(parents=True, exist_ok=True)
for ind, cnty in enumerate(model_fips):
# Pull just the relevant county
cnty_true_df = true_df[true_df['fips'] == cnty]
cnty_model = model_quants[:,:,ind]
# Ensure true_df is chronolically sorted
cnty_true_df.sort_values(by=['dateDT'],inplace=True)
# Create column with days since global_dayzero (to have same reference point for both datasets)
cnty_true_df['rel_date'] = (cnty_true_df['dateDT'] - global_dayzero)/np.timedelta64(1,'D')
# Create time axes
t_true = cnty_true_df['rel_date'].values
t_model = np.arange(cnty_model.shape[1])
# Format title for state vs. county plots
if isStateWide:
# Don't add county item since it's not pertinent
ptit = 'SEIIRD+Q Model: %s (%d)'%(cnty_true_df['state'].iloc[0], cnty)
else:
# Include county in title
ptit = 'SEIIRD+Q Model: %s, %s (%d)'%(cnty_true_df['county'].iloc[0],cnty_true_df['state'].iloc[0], cnty)
if cnty in fips_to_clst:
# Add cluster ID when the county was clustered
ptit += ' [Cluster %d]'%fips_to_clst[cnty]
# Format y-axis label for cumulative vs. daily plots
if isCumul or isStateWide:
# NOTE: statewide is implicitly cumulative
# Set y-axis label to show cumulative counts
ylab = '# deaths total'
else:
# Set y-axis label to show deaths/day
ylab = '# deaths/day'
# Create figure for the plot
p = bkp.figure( plot_width=600,
plot_height=400,
title = ptit,
x_axis_label = 't (days since %s)'%global_dayzero.date(),
y_axis_label = ylab)
# CONSIDER FLIPPING THE ORDER OF QUANTILES TO SEE IF IT FIXES THE PLOTTING
# Plot uncertainty regions
for i in range(4):
p.varea(x=t_model, y1=cnty_model[i,:], y2=cnty_model[-i-1,:], color='black', fill_alpha=perc_list[i]/100)
# Plot 50th percentile line
p.line(t_model, cnty_model[4,:], color = 'black', line_width = 1)
# Plot true deaths
p.circle(t_true, cnty_true_df['deaths'], color ='black')
# Apply training boundary if desired
if boundary is not None:
bd_day = ( | pd.to_datetime(boundary) | pandas.to_datetime |
"""
Module containing classes, methods and functions related to queries to the ZAMG datahub.
"""
import pandas as pd
class ZAMGdatahubQuery:
"""
Attributes:
"""
def __init__(self,dataset,params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output="netcdf"):
dataset = dataset.upper()
if dataset=="INCA":
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "incal-hourly"
elif dataset=="SPARTACUS":
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "spartacus-daily"
else:
print("Specified dataset was not 'SPARTACUS' or 'INCA'. Setting the output filename to 'data'...")
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "data"
# add attributes
self.query = query
self.params = params
self.lat_min = lat_min
self.lat_max = lat_max
self.lon_min = lon_min
self.lon_max = lon_max
self.dataset = dataset
self.name = gridboxlabel
def saveQuery(self,filename=None,DIR=None):
if DIR is None:
DIR="."
if filename is None:
filename = f"{self.dataset}_query_{self.name}"
saveQuery(self.query,filename,DIR=DIR)
def makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output="netcdf"):
query = {"params":params,
"gridboxlabel":gridboxlabel,
"lat_min":lat_min,
"lat_max":lat_max,
"lon_min":lon_min,
"lon_max":lon_max}
if output == "netcdf":
query["output_format"]=output
query["file_extention"]="nc"
elif output == "csv":
query["output_format"]=output
query["file_extention"]=output
else:
raise ValueError("The output can only be 'netcdf' or 'csv'.")
return query
def saveQuery(query,filename,DIR="."):
queryTable = pd.DataFrame.from_dict(query,orient="index",columns=["query"])
queryTable.to_csv(f"{DIR}/{filename}.txt",sep="\t")
print(f'Query saved to "{DIR}/{filename}.txt"')
def loadQuery(file):
query = | pd.read_table(file,index_col=0) | pandas.read_table |
import pandas as pd
import numpy as np
import pickle
import lap_v2_py3 as lap_v2
reprocess_new_basis = True
#Folder with data:
source_folder = '../Source_Data/'
dest_folder = '../Processed_Data/'
if reprocess_new_basis:
#Load in the conversion table
conv = pd.read_csv(source_folder+'ann.csv',usecols=[1,2],index_col=0,squeeze=True)
#Throw out ambiguous reads
conv = conv[~conv.index.duplicated(keep=False)]
#Load in the new basis data
LU = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='LU_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
FG = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='LU_POS vs FG',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1)
TH = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='TH_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
BR = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='BR_POS vs ECT',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1)
ECT = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='BR_POS vs ECT',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
newdict = {'E.9 Lung Prim Nkx+':LU,'E.13 Thyroid':TH,'E.8.25 Foregut Endoderm':FG,'E.9 Forebrain':BR,'E.8.25 Ectoderm':ECT}
#Reindex using Entrez ID's, add name, and average duplicates
for name in newdict.keys():
newdict[name].index=conv
newdict[name].dropna(inplace=True)
newdict[name].name = name
temp = newdict[name].copy()
newdict[name] = newdict[name][~newdict[name].index.duplicated()]
for item in newdict[name].index:
newdict[name].loc[item] = temp.loc[item].mean()
del temp
f = open(dest_folder+'NewBasis.dat','wb')
pickle.dump(newdict,f)
f.close()
else:
f = open(dest_folder+'NewBasis.dat','rb')
newdict = pickle.load(f)
f.close()
#%% Load in the basis data
basis = pd.read_csv(source_folder+'Mouse_Basis_Data.txt',sep='\t',index_col=0,usecols=[0]+list(range(3,64)))
#####################
# Append new basis data
#thresh = 1
basis_new = basis.copy()
newdict_log = {}
#for name in newdict.keys():
# newdict_log[name] = np.log2(newdict[name]+1)
# basis_new = basis_new.join(newdict_log[name][newdict_log[name]>thresh],how='inner')
for name in newdict.keys():
basis_new = basis_new.join(newdict[name],how='inner')
basis_new.dropna(inplace=True)
####################
#Load Keri's data
keri_index = pd.read_csv(source_folder+'entrez_id.txt',index_col=None,header=None).squeeze().values
keri_label = pd.read_csv(source_folder+'keri_cell_lbl.txt',index_col=None,header=None).squeeze()
keri = pd.read_csv(source_folder+'keri_ranknorm_data_corr.txt',index_col=None,header=None,sep='\t')
keri.index=keri_index
keri = keri.rename(columns=keri_label)
####################
# Load original project data
data = pd.read_excel(source_folder+'InVitroNew.xlsx',index_col=0,usecols=[0]+list(range(38,50)))
####################
#Load new data, reindex using Entrez ID's, and average duplicates
#Load in the conversion table
conv = pd.read_csv(source_folder+'AnnotationTable_mogene10sttranscriptcluster.txt',index_col=1,usecols=[2,3],squeeze=True,sep='\t')
#Throw out ambiguous reads
conv = conv[~conv.index.duplicated(keep=False)]
data_new = pd.read_excel(source_folder+'fpkm_7-2018.xlsx',index_col=0).reindex(index=conv.keys())
data_new.index=conv
data_new.dropna(inplace=True)
#data_new = data_new[(data_new.T != 0).any()]
temp = data_new.copy()
data_new = data_new[~data_new.index.duplicated()]
#Right now, not averaging duplicates, because it is slow and doesn't matter
#for label in data_new.keys():
# for item in data_new.index:
# data_new.loc[item,label] = temp[label].loc[item].mean()
del temp
####################
#Load 13.5 hepatoblast
hepatoblast = pd.read_csv(source_folder+'E13.5.hepatoblast.csv',index_col=0).mean(axis=1).squeeze().reindex(conv.index)
hepatoblast.index = conv.values
hepatoblast.dropna(inplace=True)
hepatoblast = hepatoblast[~hepatoblast.index.duplicated()]
hepatoblast.name = 'E.13.5 Hepatoblast'
####################
#Load 16.5 lung
lung = pd.read_excel(source_folder+'GSE57391_count.xlsx',index_col=0).mean(axis=1).squeeze()
lung.name = 'E.16.5 Lung'
####################
#Load old test data, reindex using Entrez ID's, and average duplicates
conv_probe = pd.read_csv(source_folder+'AnnotationTable_mogene10sttranscriptcluster.txt',index_col=0,usecols=[0,2],squeeze=True,sep='\t')
data_old = pd.read_csv(source_folder+'InVitroOld.txt',index_col=0,skipfooter=1,engine='python',sep='\t').reindex(index=conv_probe.keys())
data_old.index = conv_probe
data_old_dedup = data_old[~data_old.index.duplicated()].copy()
data_old_dedup.dropna(inplace=True)
#for item in data_old_dedup.index:
# data_old_dedup.loc[item] = data_old.loc[item].mean()
###################
#Load single-cell data and reindex using Entrez ID's, dropping duplicates
TimePoints = ['E9','E13','E15.5','E17.5']
df0=pd.read_csv(source_folder+TimePoints[0]+'/res.0.5.clusterAverages.tsv',sep='\t')
SingleCell = pd.DataFrame(index=df0.index)
conv = pd.read_csv(source_folder+'AnnotationTable_mogene10sttranscriptcluster.txt',index_col=0,usecols=[1,2],squeeze=True,sep='\t')
conv = conv[~conv.duplicated()]
conv = conv[~conv.index.duplicated()]
for TimePoint in TimePoints:
df= | pd.read_csv(source_folder+TimePoint+'/res.0.5.clusterAverages.tsv',sep='\t') | pandas.read_csv |
"""
Written by <NAME> for COMP9418 assignment 2.
"""
import copy
import re
import math
from collections import OrderedDict as odict
from itertools import product
from functools import reduce
import numpy as np
import pandas as pd
from graphviz import Digraph, Graph
from tabulate import tabulate
class GraphicalModel:
def __init__(self):
self.net = dict()
self.factors = dict()
self.outcomeSpace = dict()
self.node_value = dict()
####################################
######################################
# Representation
########################################
##########################################
def load(self, FileName):
"""
Load and initiate model from file
input:
FileName: String
"""
self.__init__()
with open(FileName, 'r') as f:
content = f.read()
node_pattern = re.compile(
r'node (.+) \n\{\n states = \( \"(.+)\" \);\n\}')
potential_pattern = re.compile(
r'potential \( (.+) \) \n\{\n data = \((.+)\)[ ]?;\n\}')
nodes_records = node_pattern.findall(content)
data_records = potential_pattern.findall(content)
for record in nodes_records:
outcome = tuple(re.split(r'\" \"', record[1]))
self.insert(record[0], outcome)
for record in data_records:
splits = record[0].split(' | ')
node = splits[0]
parents = []
if len(splits) > 1:
parents = list(reversed(splits[1].split()))
data = [float(i) for i in re.findall(
r'[0-1][.][0-9]+', record[1])]
self.factorize(node, data, parents)
def connect(self, father, child):
"""
Connect Two nodes.
Inputs:
father: String, name of the father node
child: String, name of the child node
"""
if father in self.net and child in self.net and child not in self.net[father]:
self.net[father].append(child)
def disconnect(self, father, child):
"""
Disconnect Two nodes.
Inputs:
father: String, name of the father node
child: String, name of the child node
"""
if father in self.net and child in self.net and child in self.net[father]:
self.net[father].remove(child)
def factorize(self, node, data, parents=[]):
"""
Specify probabilities for a node.
data is a 1-d array or a simple list.
Inputs:
node: Sting, the node you want to specify
data: 1-D array like, the CPT of the node
parents: list of strings, parents of the node
"""
dom = parents + [node]
dom = tuple(dom)
for parent in parents:
self.connect(parent, node)
self.factors[node] = {'dom': dom, 'table': odict()}
outcome_product = product(*[self.outcomeSpace[node] for node in dom])
assert np.prod([len(self.outcomeSpace[node])
for node in dom]) == len(data), 'CPT length illegal'
for i, combination in enumerate(outcome_product):
self.factors[node]['table'][combination] = data[i]
def insert(self, Name, Outcome):
"""
simply insert a node to the graph
Inputs:
Outcome: a 1-D array-like, outcome space of this node
Name: String, the name of the node
"""
if Name not in self.net:
self.net[Name] = []
self.outcomeSpace[Name] = Outcome
else:
print(f'Already have node {Name}')
def remove(self, node):
if node in self.net:
if node in self.factors:
for child in self.net[node]:
if node in self.factors[child]['dom']:
self.sum_out(child, node)
self.factors.pop(node)
self.net.pop(node)
self.outcomeSpace.pop(node)
for other_node in self.net:
if node in self.net[other_node]:
self.net[other_node].remove(node)
def sum_out(self, node, victim):
"""
sum out the victim in the factor of node
Inputs:
node: String, name of the node
victim: String, name of the node to be sum out
"""
assert victim in self.factors[node]['dom'], 'the node to sum out is not one of the parents'
f = self.factors[node]
new_dom = list(f['dom'])
new_dom.remove(victim)
table = list()
for entries in product(*[self.outcomeSpace[node] for node in new_dom]):
s = 0
for val in self.outcomeSpace[victim]:
entriesList = list(entries)
entriesList.insert(f['dom'].index(victim), val)
p = f['table'][tuple(entriesList)]
s = s + p
table.append((entries, s))
self.factors[node] = {'dom': tuple(new_dom), 'table': odict(table)}
def save(self, fileName):
"""
save the graph to a file
Inputs:
fileNamea: String, the path of the file you want to save to.
"""
f = open(fileName, 'w')
f.write('net\n{\n}\n')
# first node domain part
for node, values in self.outcomeSpace.items():
outcome = " ".join(
['"' + value + '"' for value in values])
text = 'node %s \n{\n states = ( %s );\n}\n' % (node, outcome)
f.write(text)
# add data
for node, factor in self.factors.items():
potential = factor['dom'][-1]
data = " ".join([str(_) for _ in factor['table'].values()])
if len(factor['dom']) > 1:
parents = list(factor['dom'][:-1])
parents.reverse()
potential += ' | ' + " ".join(parents)
text = 'potential ( %s ) \n{\n data = ( %s );\n}\n' % (
potential, data)
f.write(text)
f.close()
def printFactor(self, node):
"""
print the factor table of the node
"""
f = self.factors[node]
table = list()
for key, item in f['table'].items():
k = list(key)
k.append(item)
table.append(k)
dom = list(f['dom'])
dom.append('Pr')
print(tabulate(table, headers=dom, tablefmt='orgtbl'))
def showGraph(self):
"""
Visualize the net graph.
"""
dot = Digraph()
dot.attr(overlap="False", splines="True")
for node, children in self.net.items():
dot.node(node)
for child in children:
dot.edge(node, child)
return dot
####################################
######################################
# Pruning and pre-processing techniques for inference
########################################
##########################################
def prune(self, query, **evidences):
"""
Prune the graph based of the query vcariables and evidences
Inputs:
query: list of strings, the query variables
evidences: dictionary, key: node, value: outcome of the node
Outputs:
a new graph
"""
evi_vars = list(evidences.keys())
qe = set(query + evi_vars)
assert all([_ in self.net for _ in qe])
newG = copy.deepcopy(self)
all_deleted = 0
# prune nodes
while not all_deleted:
all_deleted = 1
W = set()
for node, children in newG.net.items():
if node not in qe and not children:
W.add(node)
all_deleted = 0
for leaf in W:
newG.remove(leaf)
# clear the child who have been deleted
for node, children in newG.net.items():
newG.net[node] = [_ for _ in children if _ not in W]
# prune edge
for node, value in evidences.items():
for child in newG.net[node]:
newG.factors[child] = self.update(
newG.factors[child], node, value, newG.outcomeSpace)
newG.net[node] = []
newG.node_value[node] = value
netcopy = copy.deepcopy(newG.net)
reachable_from_q = self.spread(self.make_undirected(netcopy), query)
nodes = list(newG.net.keys())
for node in nodes:
if node not in reachable_from_q:
newG.remove(node)
return newG
@staticmethod
def update(factor, node, value, outcomeSpace):
"""
Specify a value to a node.
Inputs:
factor: the factor of the node
node: the node to update
value: the value that will be assigned to the node
outcomeSpace: Dictionary, the outcome space of all nodes
Return:
a new factor without node
"""
assert node in factor['dom'][:-1], 'such node is not in this CPT'
assert value in outcomeSpace[node], 'no such value for this node'
new_dom = copy.copy(factor['dom'])
factor_outcomeSpace = {node: outcomeSpace[node] for node in new_dom}
factor_outcomeSpace[node] = (value,)
node_index = new_dom.index(node)
new_dom_list = list(new_dom)
new_dom_list.remove(node)
new_dom = tuple(new_dom_list)
new_table = odict()
valid_records = product(*[_ for _ in factor_outcomeSpace.values()])
for record in valid_records:
record_list = list(record)
record_list.pop(node_index)
new_record = tuple(record_list)
new_table[new_record] = factor['table'][record]
return {'dom': new_dom, 'table': new_table}
def spread(self, graph, source):
"""
find all nodes reachable from source
Inputs:
graph: Dictionary, the graph
source: list of strings, the node where we start the spread
Return:
visted: a set of strings, the nodes reachabel from source.
"""
visited = set()
for node in source:
self.spread_help(graph, node, visited)
return visited
def spread_help(self, graph, node, visited):
visited.add(node)
for child in graph[node]:
if child not in visited:
self.spread_help(graph, child, visited)
def make_undirected(self, graph):
"""
Input:
graph: a directed graph
Return:
an undirected (bidirected) graph
"""
undirectG = graph.copy()
GT = self.transposeGraph(graph)
for node in graph:
undirectG[node] += GT[node]
return undirectG
@staticmethod
def transposeGraph(G):
"""
Input:
graph: a directed graph
Return:
a transposed graph
"""
GT = dict((v, []) for v in G)
for v in G:
for w in G[v]:
if w in GT:
GT[w].append(v)
else:
GT[w] = [v]
return GT
def min_degree_order(self):
"""
get the variable elimination from the graph based on min-degree heuristic
Return:
prefix: a list of strings, list of variables in the elimination order
width: the width of the order
"""
prefix = []
moral_graph = self.moralize()
moral_graph.factors = dict()
width = 0
while len(moral_graph.net) > 0:
low = math.inf
min_degree = math.inf
for node, neighbors in moral_graph.net.items():
fill_num = moral_graph.count_fill(node)
degree = len(moral_graph.net[node])
if degree < min_degree:
min_degree_node = node
low = fill_num
min_degree = degree
width = max(width, degree)
elif degree == min_degree:
if fill_num < low:
min_degree_node = node
low = fill_num
width = max(width, degree)
moral_graph.remove(min_degree_node)
prefix.append(min_degree_node)
return prefix, width
def min_fill_order(self):
"""
get the variable elimination from the graph based on min degree heuristic
Return:
prefix: a list of strings, list of variables in the elimination order
width: the width of the order
"""
prefix = []
moral_graph = self.moralize()
moral_graph.factors = dict()
width = 0
while len(moral_graph.net) > 0:
low = math.inf
min_degree = math.inf
for node, neighbors in moral_graph.net.items():
fill_num = moral_graph.count_fill(node)
degree = len(moral_graph.net[node])
if fill_num < low:
min_fill_node = node
low = fill_num
min_degree = degree
width = max(width, degree)
elif fill_num == low:
if degree < min_degree:
min_fill_node = node
min_degree = degree
width = max(width, degree)
moral_graph.remove(min_fill_node)
prefix.append(min_fill_node)
return prefix, width
def count_fill(self, node):
"""
count the fill in edges if eliminate node
Input:
node: string, the name of the node to be eliminate
Return:
int: fill-in edge count
"""
neighbors = self.net[node]
neighbor_num = len(neighbors)
before = 0
for neighbor in neighbors:
for neighbor_s_neighbor in self.net[neighbor]:
if neighbor_s_neighbor in neighbors:
before += 1
before //= 2
after = neighbor_num*(neighbor_num-1)//2
return after - before
def moralize(self):
"""
moralize the graph
return:
a new moral graph
"""
new_graph = copy.deepcopy(self)
graphT = self.transposeGraph(new_graph.net)
new_graph.net = self.make_undirected(new_graph.net)
for parents in graphT.values():
new_graph.connect_all(parents)
return new_graph
def connect_all(self, nodes):
"""
connect every node in nodes to every other node
"""
for father in nodes:
for child in nodes:
if father != child:
self.connect(father, child)
def show_moral_graph(self):
moral_graph = self.moralize()
dot = Graph(strict="True")
for node, children in moral_graph.net.items():
dot.node(node)
for child in children:
dot.edge(node, child)
return dot
####################################
######################################
# Exact inference
########################################
##########################################
def to_jointree(self, order):
"""
self must be a moral graph
Args:
order (list): elimination order
"""
for node, neighbors in self.net.items():
for neighbor in neighbors:
assert node in self.net[neighbor], 'the graph is not moral'
moral_graph = copy.deepcopy(self)
# 1. construct clusters
clusters = []
max_cluster_size = 0
for node in order:
cluster = set([node] + moral_graph.net[node])
moral_graph.connect_all(moral_graph.net[node])
moral_graph.remove(node)
if len(cluster) > max_cluster_size:
max_cluster_size = len(cluster)
clusters.append(cluster)
# 2. maitain RIP
cluster_seq = [tuple(_) for _ in clusters]
n = len(clusters)
for cluster in reversed(clusters):
if len(cluster) < max_cluster_size:
i = cluster_seq.index(tuple(cluster))
for pre in reversed(cluster_seq[:i]):
if cluster.issubset(pre):
cluster_seq.remove(tuple(cluster))
cluster_seq.insert(i, pre)
cluster_seq.remove(pre)
break
# 3. assembly
cluster_net = dict()
cluster_net[cluster_seq[-1]] = []
n = len(cluster_seq)
for i in range(n-2, -1, -1):
cluster_net[cluster_seq[i]] = []
edge = set(cluster_seq[i+1]).union(
*[set(_) for _ in cluster_seq[i+2:]]) & set(cluster_seq[i])
for other in cluster_seq[i+1:]:
if edge.issubset(other):
cluster_net[cluster_seq[i]].append(other)
break
# assign factors to jointree
factors = dict()
for cluster in cluster_seq:
factors[cluster] = self.join(
*[self.factors[node] for node in cluster])
return JoinTree(cluster_net, factors, self.outcomeSpace)
def join(self, *factors):
common_vars = list(reduce(lambda x, y: x | y, [
set(f['dom']) for f in factors]))
table = list()
for entries in product(*[self.outcomeSpace[node] for node in common_vars]):
entryDict = dict(zip(common_vars, entries))
p = 1
for f in factors:
f_entry = tuple(entryDict[var] for var in f['dom'])
pf = f['table'][f_entry]
p *= pf
table.append((entries, p))
return {'dom': tuple(common_vars), 'table': odict(table)}
####################################
######################################
# Approximate inference
########################################
##########################################
def gibbs_sampling(self, sample_num=100, chain_num=2, q_vars='all', **q_evis):
"""
gibbs sampling the graph based on query, sample_num and chain_num specified by the user
Input:
sample_num: # of samples
chain_num: # of chains
q_vars: list of strings, the query variables, defalt is 'all', which means all variables in the graph other than query evidences
q_evis: dictionary, the query evidences
Return:
samples: a list of dictionaries, each one is a sample contains the node and its value in query
"""
if q_vars == 'all':
q_vars = [_ for _ in self.net.keys() if _ not in q_evis]
prunned_graph = self.prune(q_vars, **q_evis)
chains = prunned_graph.burn_in(chain_num, **q_evis)
samples = []
# fisrt sample
sample = dict()
for var in q_vars:
sample[var] = chains[0].node_value[var]
samples.append(sample)
curr = 1
while curr < sample_num:
sample = dict()
for var in q_vars:
chain = chains[np.random.choice(chain_num)]
pre_value = samples[curr - 1][var]
value = chain.sample_once(var)
# A = chain.get_acceptance(var, pre_value, value)
# sample[var] = np.random.choice(
# [value, pre_value], 1, p=[A, 1-A])[0]
sample[var] = value
samples.append(sample)
curr += 1
return samples
def get_acceptance(self, node, pre, curr):
"""
compute the acceptande probability of this sample
Inputs:
node: string, the node waiting to be asigned
pre: string, the previous value assigned to this node
curr: string, the current value waiting to be assigned to this node
Return:
accpt_prob: float, the acceptande probability
"""
dom = self.factors[node]['dom']
parents = dom[: -1]
parents_value = [self.node_value[parent] for parent in parents]
ppre = self.factors[node]['table'][tuple(parents_value + [pre])]
pcurr = self.factors[node]['table'][tuple(parents_value + [curr])]
return min(1, pcurr/ppre)
def burn_in(self, chain_num, window_size=100, **evidences):
"""
generate chains and keep sampling until mixed
Inputs:
chain_num: int, # of chains
window_size: int, # of samples used to test if chains are mixed, defalt is 100
evidences: dictionary, the evidences of the query
Return:
chains: list of GraphicalModel objects, the list of mixed chains
"""
assert chain_num > 1, 'chain num is at least 2'
chains = []
chains_non_evis = []
for seed in range(chain_num):
np.random.seed(seed)
chain = copy.deepcopy(self)
# 1. fix evidence
chain.node_value = evidences.copy()
# 2: Initialize other variables
non_evis = dict()
for node, domain in self.outcomeSpace.items():
if node not in evidences:
value = np.random.choice(domain, 1)[0]
chain.node_value[node] = value
non_evis[node] = [value]
chains.append(chain)
chains_non_evis.append(non_evis)
sample_count = 1
while True:
if sample_count >= window_size:
if self.mixed(chains_non_evis, self.outcomeSpace):
break
# clear the chains_non_evis
chains_non_evis = [{
node: []
for node in chains_non_evis[i].keys()
} for i in range(chain_num)]
sample_count = 0
# 3: Choose a variable ordering O
O = np.random.permutation(list(chains_non_evis[0].keys()))
# 4: Repeat sample non_evis in the order O
for var in O:
for i, chain in enumerate(chains):
value = chain.sample_once(var)
chain.node_value[var] = value
chains_non_evis[i][var].append(value)
sample_count += 1
return chains
def sample_once(self, node):
"""
sample once for a particular node
Input:
node: string, name of the node to sample
Return:
a string, a value from this node's outcomeSpace
"""
dom = self.factors[node]['dom']
parents = dom[: -1]
parents_value = [self.node_value[parent] for parent in parents]
combinations = [tuple(parents_value + [node_value])
for node_value in self.outcomeSpace[node]]
prob_list = np.array([self.factors[node]['table'][combination]
for combination in combinations])
prob_list /= np.sum(prob_list)
return np.random.choice(self.outcomeSpace[node], 1, p=prob_list)[0]
@staticmethod
def convert(list_of_dict, outcomeSpace):
"""
convert the outcome string value from the outcomespace into float value between 0 and 1
Input:
list_of_dic: list of dictionary, each key in the dictionary is a variable and corresponding value is the history of its sample value
outcomeSpace: dictionary, the outcome space of all nodes
Return:
list_of_dic, converted list_of_dict
"""
mapping = dict()
for node, values in outcomeSpace.items():
mapping[node] = dict()
for value in values:
mapping[node][value] = (values.index(value)+1) / len(values)
for i, record in enumerate(list_of_dict):
list_of_dict[i] = {key: [mapping[key][value]
for value in item] for key, item in record.items()}
return list_of_dict
def mixed(self, chain_vars, outcomeSpace):
"""
to judge whether chain_vars are mixed up
Inputs:
chain_vars = [
{a:[...], b:[...] ...},
{a:[...], b:[...] ...}]
the history of samples' value
outcomeSpace: dictionary, the outcome space of all nodes
Return:
bool, whether chain_vars are mixed up
"""
# covert text value into num like value
chain_vars = self.convert(chain_vars, outcomeSpace)
parameters = list(chain_vars[0].keys())
P_hat = []
df_list = [pd.DataFrame(var_dic) for var_dic in chain_vars]
concat_df = | pd.concat(df_list, ignore_index=True) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.