prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from pickle import TRUE
from flask import *
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import random
import socket
import os
import time
import rss23
pid = 3
def rssrd(r, xy,client):
f = {}
g = {}
R = {}
esend = {}
epk1 = {}
for j in range(rss23.n):
if j+1==pid:
continue
f[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
g[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
R[pid,(j+1)] = random.uniform(11*(10**18),19*(10**18))
for j in range(rss23.n):
if j+1==pid:
continue
prod = f[pid,(j+1)] * r
esend[pid,(j+1)] = ( rss23.public_key.encrypt(prod) , f[pid,(j+1)] )
for j in range(1,4):
if j == pid:
rss23.client_send(esend, client)
else:
print("Ready to receive")
rss23.client_receive(pid, client)
print("Received data")
print(rss23.erecive)
fj = {}
for i in rss23.erecive.keys():
epk1[i[0],i[1]]=( rss23.erecive[i][0] * g[i[1],i[0]] * xy + R[i[1],i[0]] , g[i[1],i[0]] )
fj[i] = rss23.erecive[i][1]
print("fj ",fj,"\n")
print()
for j in range(1,4):
if j == pid:
rss23.epk_send(epk1, client)
else:
rss23.epk_receive(pid, client)
print("Received dat 01a")
print(rss23.epkfinal)
share1 = {}
share2 = {}
for i in rss23.epkfinal.keys():
nr = rss23.private_key.decrypt(rss23.epkfinal[i][0])
dr = rss23.epkfinal[i][1] * f[i]
share1[i] = nr/dr
share2[i] = - R[i] / ( fj[(i[1],i[0])] * g[i] )
print('ok')
t = round(random.uniform((-0.5),(0.5)),6)
si = 0
for i in share1.keys():
si += share1[i] + share2[i] + ( r + t ) * xy
rss23.s = []
for j in range(1,4):
if j == pid:
rss23.si_send(si, client)
else:
rss23.si_receive(client)
rss23.s.append(si)
print(rss23.s)
return sum(rss23.s)
def rss(d2,client):
print("**********************102********************")
print(type(d2))
x, y = d2['x'], d2['y']
alphax = round(random.uniform((-0.5),(0.5)),6)
alphay = round(random.uniform((-0.5),(0.5)),6)
x = x + alphax
y = y + alphay
r = round(random.uniform(3000,4000),6)
sx = rssrd(r, x,client)
sy = rssrd(r, y,client)
return sx/sy
pat=os.getcwd() #to get working directory
print("CurrentPath : "+pat)
upfile=""
cols=[] #to store column names
TstCols=[]
Origional_data=[]
normalized_data=[] #to store normalized data
rotateded_data=[] #to store rotateded data about a given angle
alpha_graph=[] #This will hold file-names of alpha graph images
beta_graph=[] #This will hold file-names of beta graph images
Test_Data=[]
Normallized_Test_Data=[]
Origional_test_data=[]
rotateded_test_data=[]
#DATAFRAMES initialization
ndata= pd.DataFrame()
dat = pd.DataFrame()
dat4 = pd.DataFrame()
#Function to rotate data
def rotate_mult(dat,a,b):
cos_a = round(math.cos(a),4)
sin_a = round(math.sin(a), 4)
cos_b = round(math.cos(b),4)
sin_b = round(math.sin(b),4)
x = [[cos_a,-sin_a,0,0],[ sin_a, cos_a,0,0],[0,0, cos_b, -sin_b],[0,0, sin_b, cos_b]]
prod=np.dot(dat,x) #Rotating data (Dot product of data with x)
return prod
def clear():
#Clear all lists
angles.clear()
alpha_graph.clear()
beta_graph.clear()
cols.clear()
Origional_data.clear()
normalized_data.clear()
rotateded_data.clear()
Test_Data.clear()
TstCols.clear()
Normallized_Test_Data.clear()
Origional_test_data.clear()
rotateded_test_data.clear()
#delete older graphs
dir = pat+'/static'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
app = Flask(__name__)
@app.route('/')
def upload():
clear()
os.chdir(pat) #Switch to working directory
return render_template("file_upload_form.html")
@app.route('/Showdata', methods = ['POST'])
def success():
if request.method == 'POST':
f = request.files['file']
f.save(f.filename)
upfile = f.filename
data = | pd.read_csv(upfile) | pandas.read_csv |
"""
JACCARD Content Based Algorithm
===================================================
This implementation uses JACCARD to measure similarity between items. It uses a vectorized approach to calculating
the JACCARD score in order to improve performance
"""
# Author: <NAME>. <delacruzp>
import logging
import numpy as np
from time import time
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cdist, squareform
from progressbar import ProgressBar, Bar, Percentage, Timer
import pandas
from .CBAlgorithm import CBAlgorithm
logger = logging.getLogger('root')
def distance_batch(m1,
m2,
ids_1,
ids_2,
cap,
metric='jaccard'):
# Calculate Distance
result = 1-cdist(m1, m2, metric)
logger.debug('distance')
result = np.array(result)
# Remove super small values
result[result < cap] = 0
# Make the matrix sparse/smaller
result = csr_matrix(result)
# Return only those values
rows, cols = result.nonzero()
rows_movielens = ids_1[rows]
cols_movielens = ids_2[cols]
scores = result.data
# Filter Out similarity between the same movie
# Ex. Toy Story and Toy Story -_-
mask = rows_movielens != cols_movielens
rows_movielens = rows_movielens[mask]
cols_movielens = cols_movielens[mask]
scores = scores[mask]
return rows_movielens, cols_movielens, scores
def get_top_k(rows_movielens, cols_movielens, scores, k):
# Only save the ids of the movies for the first
# algorithm, because next one will have the same id
pre_frame = np.rec.fromarrays((rows_movielens, cols_movielens, scores), \
names=('id1_id','id2_id','als_cosine'))
p = pandas.DataFrame(pre_frame)
# Get top K elements for each movieid1 set 1
p = p \
.sort_values(by=['id1_id', 'als_cosine'], ascending=False) \
.groupby('id1_id') \
.head(k) \
.reset_index(drop=True)
return p;
class CBAlgorithmJACCARD(CBAlgorithm):
def __init__(self):
self.__name__ = 'JACCARD'
def index(self, data, max_features=1000):
'''
Index the dataset using TFIDF as score
:param data: Array of strings
:return: Sparse matrix NxM where N is the same length of data and M is the number of features
'''
data = super(CBAlgorithmJACCARD, self).index(data)
t0 = time()
self.vectorizer = CountVectorizer(max_df=0.5,
max_features=max_features,
stop_words='english')
self.indexed = self.vectorizer.fit_transform(data)
duration = time() - t0
logger.debug("n_samples: %d, n_features: %d" % self.indexed.shape)
logger.debug("duration: %d" % duration)
return self.indexed
def similarity(self, index=None, cap=0.5, k=100, batch_size=1000):
'''
Given a index (Matrix NxM) With N items and M features, calculates the similarity between each pair of items
reference: https://stackoverflow.com/a/32885931/1354478
:param index: Numpy matrix
:return: Sparse matrix NxN where every cell is the similarity of its indexes
'''
if index is None:
index = self.indexed
super(CBAlgorithmJACCARD, self).similarity(index)
# Get all the ids
t0 = time()
logger.debug(index.shape)
matrix = index.todense()
logger.debug('densed')
# Start
bar = ProgressBar(maxval=matrix.shape[0]/batch_size + 1, \
widgets=['JACCARD', ' ', Bar('=', '[', ']'), ' ', Percentage(), ' - ', Timer()])
bar.start()
# Calculate Similarity
counter = 0
for i in range(0, matrix.shape[0], batch_size):
logger.debug("%d/%d", i, matrix.shape[0])
m1 = matrix[i:i+batch_size,:]
# Calculate Distance
rows_movielens, cols_movielens, scores = distance_batch(m1, matrix, self.ids, self.ids, cap)
# Extract TOP K result
p = get_top_k(rows_movielens, cols_movielens, scores, k)
# Temporarily save to a local file
p.to_pickle('Temp/%s_%i' % (self.__name__, i))
counter += 1
bar.update(counter)
bar.finish()
# Append All Similarities
frames = []
for i in range(0, matrix.shape[0], batch_size):
frames.append(pandas.read_pickle('Temp/%s_%i' % (db_fieldname, i)))
result = | pandas.concat(frames, axis=0) | pandas.concat |
from typing import Union, cast
import warnings
import numpy as np
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if | is_number(left) | pandas.core.dtypes.common.is_number |
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
import warnings
import time
warnings.filterwarnings('ignore')
def one_hot(df, col, pre):
df_dummy = pd.get_dummies(df[col],prefix=pre,drop_first=True)
df = pd.concat([df, df_dummy], axis=1)
df = df.drop(col, axis=1)
return df
def metric(index, x_test, y_test, y_test_predicted):
TP = 0
FP = 0
TN = 0
FN = 0
for i, val in enumerate(x_test):
if(val[index] == 1):
if y_test[i]==y_test_predicted[i]==1:
TP += 1
if y_test_predicted[i]==1 and y_test[i]!=y_test_predicted[i]:
FP += 1
if y_test[i]==y_test_predicted[i]== 0:
TN += 1
if y_test_predicted[i]==0 and y_test[i]!=y_test_predicted[i]:
FN += 1
TPR_0 = TP/(TP+FN)
TNR_0 = TN/(FP+TN)
TP = 0
FP = 0
TN = 0
FN = 0
for i, val in enumerate(x_test):
if(val[index] == 0):
if y_test[i]==y_test_predicted[i]==1:
TP += 1
if y_test_predicted[i]==1 and y_test[i]!=y_test_predicted[i]:
FP += 1
if y_test[i]==y_test_predicted[i]==0:
TN += 1
if y_test_predicted[i]==0 and y_test[i]!=y_test_predicted[i]:
FN += 1
TPR = TP/(TP+FN)
TNR = TN/(FP+TN)
print("Accuracy:",metrics.accuracy_score(y_test, y_test_predicted))
print("Precision:",metrics.precision_score(y_test, y_test_predicted))
print("Recall:",metrics.recall_score(y_test, y_test_predicted))
print("F1:",metrics.f1_score(y_test, y_test_predicted))
print("DI: ", di(index, x_test, y_test, y_test_predicted))
print("TPRB:", TPR_0-TPR)
print("TNRB:", TNR_0-TNR)
def di(index, x_test, y_test, y_pred):
a,b,c,d = 0.0, 0, 0, 0
for i, val in enumerate(x_test):
if(val[index] == 0):
if(y_pred[i] == 1):
a += 1
else:
c += 1
elif(val[index] == 1):
if(y_pred[i] == 1):
b += 1
else:
d += 1
score = (a / (a + c)) / (b / (b + d))
return score
import math
def cd(index, x_test, clf):
conf_z = 2.58
x_test_new = np.zeros(shape=(x_test.shape[0]*2,x_test.shape[1]))
for i, val in enumerate(x_test):
x_test_new[i*2] = val
val[index] = (val[index] + 1)%2
x_test_new[i*2 +1] = val
y_pred = clf.predict(x_test_new)
count = 0
for i, val in enumerate(y_pred):
#print(val)
if (i%2) == 1:
continue
if(val != y_pred[i+1]):
count = count + 1
cd = (count/x_test.shape[0])
err = conf_z * math.sqrt((cd * (1 - cd)) / x_test.shape[0])
print("CD:", cd, "margin of error:", err)
return y_pred
def adult_preprocess(df):
def income(x):
if x in ['<=50K', '0', 0]:
return 0.0
else:
return 1.0
def sex(x):
if x in ['Male', "1", 1]:
return 1.0
else:
return 0.0
def country_bin(x):
if (x == 'United-States'):
return "United-States"
else:
return "Non-US"
df['sex'] = df['sex'].apply(lambda x: sex(x))
df['income'] = df['income'].apply(lambda x: income(x))
df['native_country'] = df['native_country'].apply(lambda x: country_bin(x))
return df
def compas_preprocess(df):
def two_year_recid(x):
if x in ['Did recid.', '0', 0]:
return 0.0
else:
return 1.0
def sex(x):
if x in ['Male', "1", 1]:
return 1.0
else:
return 0.0
def race(x):
if x in ['African-American']:
return 0.0
else:
return 1.0
df['Sex'] = df['Sex'].apply(lambda x: sex(x))
df['Race'] = df['Race'].apply(lambda x: race(x))
df['two_year_recid'] = df['two_year_recid'].apply(lambda x: two_year_recid(x))
return df
def german_preprocess(df):
def credit(x):
if x in ['Bad Credit', '0', 0]:
return 0.0
else:
return 1.0
def sex(x):
if x in ['Male', "1", 1]:
return 1.0
else:
return 0.0
df['Sex'] = df['Sex'].apply(lambda x: sex(x))
df['credit'] = df['credit'].apply(lambda x: credit(x))
return df
def Adult(f):
X_int = ['age', 'edu_level', 'hours_per_week']
X_cat = [ 'marital_status', 'occupation','workclass', 'relationship', 'race', 'native_country']
S = ['sex']
Y = ['income']
keep = X_int + X_cat + S + Y
df = pd.read_csv(f)
df = df[keep]
test = pd.read_csv("data/adult_test.csv")
test = test[keep]
df = pd.concat([df, test])
df = adult_preprocess(df)
#df = df.dropna(how='any', axis=0)
for i in X_cat:
if i in keep:
df = one_hot(df, i, i)
X_train, X_test = train_test_split(df, test_size=0.3, shuffle=False)
train_y = np.array(X_train['income'])
X_train = X_train.drop(['income'], axis=1)
test_y = np.array(X_test['income'])
X_test = X_test.drop(['income'], axis=1)
index = X_train.columns.get_loc('sex')
clf = LogisticRegression(solver="liblinear")
clf.fit(X_train, train_y)
y_pred = clf.predict(X_test)
metric(index, np.array(X_test), test_y, y_pred)
y_cd = cd(index, np.array(X_test), clf)
test = pd.read_csv("data/adult_test.csv")
test['pred'] = y_pred
test.to_csv("results_unconstrained/adult_test_repaired.csv", index=False)
np.savetxt("results_unconstrained/adult_test_repaired_cd.csv", y_cd, delimiter=",")
def Compas(f, t="data/compas_test.csv", f1='', f2=''):
X_int = ['Prior', 'Age']
X_cat = ['Sex']
S = ['Race']
Y = ['two_year_recid']
keep = X_int + X_cat + S + Y
df = pd.read_csv(f)
df = df[keep]
test = pd.read_csv(t)
test = test[keep]
df = pd.concat([df, test])
df = compas_preprocess(df)
for i in X_cat:
if i in keep:
df = one_hot(df, i, i)
X_train, X_test = train_test_split(df, test_size=0.3, shuffle=False, random_state=42)
train_y = np.array(X_train['two_year_recid'])
X_train = X_train.drop(['two_year_recid'], axis=1)
test_y = np.array(X_test['two_year_recid'])
X_test = X_test.drop(['two_year_recid'], axis=1)
index = X_train.columns.get_loc('Race')
clf = LogisticRegression()
clf.fit(X_train, train_y)
y_pred = clf.predict(X_test)
metric(index, np.array(X_test), test_y, y_pred)
y_cd = cd(index, np.array(X_test), clf)
test = pd.read_csv(t)
test['pred'] = y_pred
test.to_csv(f1+"results_unconstrained/compas_test_repaired"+f2+".csv", index=False)
np.savetxt(f1+"results_unconstrained/compas_test_repaired"+f2+"_cd.csv", y_cd, delimiter=",")
def German(f):
X_int = ['Age', 'Month', 'Investment', 'Credit_amount']
X_cat = ['Status', 'Housing', 'Savings', 'Property', 'Credit_history']
S = ['Sex']
Y = ['credit']
keep = X_int + X_cat + S + Y
df = pd.read_csv(f)
df = df[keep]
test = pd.read_csv("data/german_test.csv", header=0, delimiter=',',)
test = test[keep]
df = | pd.concat([df, test]) | pandas.concat |
from collections import defaultdict
import pandas as pd
from sympy import Basic, Symbol
from os.path import dirname, join, abspath
import cobra.test
from cobrame.util import dogma
ecoli_data_files_dir = dirname(abspath(__file__))
del dirname, abspath
def fixpath(filename):
return join(ecoli_data_files_dir, filename)
def get_biomass_composition(model, solution=None):
if solution is None:
solution = model.solution
biomass_composition = defaultdict(float)
# Account for total biomass produced in protein_biomass_dilution reaction
protein_stoich = 1.
for met, stoich in \
model.reactions.protein_biomass_to_biomass.metabolites.items():
if abs(stoich) >= 1:
protein_stoich = stoich
biomass_composition['protein'] = \
solution.x_dict['protein_biomass_to_biomass'] * protein_stoich
biomass_composition['tRNA'] = \
solution.x_dict['tRNA_biomass_to_biomass']
biomass_composition['mRNA'] = \
solution.x_dict['mRNA_biomass_to_biomass']
biomass_composition['ncRNA'] = \
solution.x_dict['ncRNA_biomass_to_biomass']
biomass_composition['rRNA'] = \
solution.x_dict['rRNA_biomass_to_biomass']
biomass_composition['lipid'] = \
solution.x_dict['lipid_biomass_to_biomass']
biomass_composition['DNA'] = \
solution.x_dict['DNA_biomass_to_biomass']
biomass_composition['peptidoglycan'] = \
solution.x_dict['peptidoglycan_biomass_to_biomass']
biomass_composition['Other'] = \
solution.x_dict['constituent_biomass_to_biomass'] + \
solution.x_dict['prosthetic_group_biomass_to_biomass']
return biomass_composition
def rna_to_protein_ratio(model, solution=None):
if solution is None:
solution = model.solution
composition = get_biomass_composition(model, solution=solution)
rna_to_protein = (composition['mRNA'] + composition['tRNA'] +
composition['rRNA'] + composition['ncRNA']) / \
(composition['protein'])
return rna_to_protein
def get_rna_fractions_dict(model, solution=None):
if solution is None:
solution = model.solution
rna_fractions = {}
composition = get_biomass_composition(model, solution=solution)
trna_to_rna = (composition['tRNA']) / (
composition['mRNA'] + composition['tRNA'] + composition['rRNA'] +
composition['ncRNA'])
rna_fractions['tRNA'] = trna_to_rna
rrna_to_rna = (composition['rRNA']) / (
composition['mRNA'] + composition['tRNA'] + composition['rRNA'] +
composition['ncRNA'])
rna_fractions['rRNA'] = rrna_to_rna
mrna_to_rna = (composition['mRNA']) / (
composition['mRNA'] + composition['tRNA'] + composition['rRNA'] +
composition['ncRNA'])
rna_fractions['mRNA'] = mrna_to_rna
ncrna_to_rna = (composition['ncRNA']) / (
composition['mRNA'] + composition['tRNA'] + composition['rRNA'] +
composition['ncRNA'])
rna_fractions['ncRNA'] = ncrna_to_rna
return rna_fractions
def make_composition_piechart(model, kind='Biomass', solution=None):
try:
import pandas
except ImportError:
raise Exception("Pandas must be installed to get biomass piechart")
if solution is None:
solution = model.solution
summary = {}
if kind == 'Biomass':
summary['Biomass composition'] = \
get_biomass_composition(model, solution=solution)
frame = | pandas.DataFrame.from_dict(summary) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
pdt.assert_frame_equal(meta_partition.data["core"], df)
assert meta_partition.metadata_version == DEFAULT_METADATA_VERSION
def test_eq():
df = pd.DataFrame({"a": [1]})
df_same = pd.DataFrame({"a": [1]})
df_other = pd.DataFrame({"a": [2]})
df_diff_col = pd.DataFrame({"b": [1]})
df_diff_type = pd.DataFrame({"b": [1.0]})
meta_partition = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df}}
)
assert meta_partition == meta_partition
meta_partition_same = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_same}}
)
assert meta_partition == meta_partition_same
meta_partition_diff_label = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}}
)
assert meta_partition != meta_partition_diff_label
assert meta_partition_diff_label != meta_partition
meta_partition_diff_files = MetaPartition.from_dict(
{"label": "another_label", "data": {"core": df}, "files": {"core": "something"}}
)
assert meta_partition != meta_partition_diff_files
assert meta_partition_diff_files != meta_partition
meta_partition_diff_col = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_col}}
)
assert meta_partition != meta_partition_diff_col
assert meta_partition_diff_col != meta_partition
meta_partition_diff_type = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_diff_type}}
)
assert meta_partition != meta_partition_diff_type
assert meta_partition_diff_type != meta_partition
meta_partition_diff_metadata = MetaPartition.from_dict(
{
"label": "test_label",
"data": {"core": df_diff_type},
"dataset_metadata": {"some": "metadata"},
}
)
assert meta_partition != meta_partition_diff_metadata
assert meta_partition_diff_metadata != meta_partition
meta_partition_different_df = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df_other}}
)
assert not meta_partition == meta_partition_different_df
meta_partition_different_label = MetaPartition.from_dict(
{"label": "test_label", "data": {"not_core": df_same}}
)
assert not meta_partition == meta_partition_different_label
meta_partition_empty_data = MetaPartition.from_dict(
{"label": "test_label", "data": {}}
)
assert meta_partition_empty_data == meta_partition_empty_data
meta_partition_more_data = MetaPartition.from_dict(
{"label": "test_label", "data": {"core": df, "not_core": df}}
)
assert not (meta_partition == meta_partition_more_data)
assert not meta_partition == "abc"
def test_add_nested_to_plain():
mp = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
to_nest = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mp_nested = to_nest[0].add_metapartition(to_nest[1])
mp_add_nested = mp.add_metapartition(mp_nested)
mp_iter = mp.add_metapartition(to_nest[0]).add_metapartition(to_nest[1])
assert mp_add_nested == mp_iter
def test_add_nested_to_nested():
mps1 = [
MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
MetaPartition(
label="label_33",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
),
]
mpn_1 = mps1[0].add_metapartition(mps1[1])
mps2 = [
MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
MetaPartition(
label="label_22",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
),
]
mpn_2 = mps2[0].add_metapartition(mps2[1])
mp_nested_merge = mpn_1.add_metapartition(mpn_2)
mp_iter = mps1.pop()
for mp_ in [*mps1, *mps2]:
mp_iter = mp_iter.add_metapartition(mp_)
assert mp_nested_merge == mp_iter
def test_eq_nested():
mp_1 = MetaPartition(
label="label_1",
files={"core": "file"},
data={"core": pd.DataFrame({"test": [1, 2, 3]})},
indices={"test": [1, 2, 3]},
dataset_metadata={"dataset": "metadata"},
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": [4, 5, 6]})},
indices={"test": [4, 5, 6]},
)
mp = mp_1.add_metapartition(mp_2)
assert mp == mp
assert mp != mp_2
assert mp_2 != mp
mp_other = MetaPartition(
label="label_3", data={"core": pd.DataFrame({"test": [4, 5, 6]})}
)
mp_other = mp_1.add_metapartition(mp_other)
assert mp != mp_other
assert mp_other != mp
def test_nested_incompatible_meta():
mp = MetaPartition(
label="label_1",
data={"core": pd.DataFrame({"test": np.array([1, 2, 3], dtype=np.int8)})},
metadata_version=4,
)
mp_2 = MetaPartition(
label="label_2",
data={"core": pd.DataFrame({"test": np.array([4, 5, 6], dtype=np.float64)})},
metadata_version=4,
)
with pytest.raises(ValueError):
mp.add_metapartition(mp_2)
def test_concatenate_no_change():
input_dct = {
"first_0": pd.DataFrame({"A": [1], "B": [1]}),
"second": pd.DataFrame({"A": [3], "B": [3], "C": [3]}),
}
dct = {"label": "test_label", "data": input_dct}
meta_partition = MetaPartition.from_dict(dct)
result = meta_partition.concat_dataframes()
assert result == meta_partition
def test_concatenate_identical_col_df():
input_dct = {
"first_0": | pd.DataFrame({"A": [1], "B": [1]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import sys
# Print Python and Pandas version
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
#%% Get inout for manual runs
results_file = 'OSeMBE_V2.1_sol_C0T0E10_sorted.txt'
#%% Get input on run specifics of the data from command prompt
#Input = sys.argv[1:]
#print(Input)
#results_file = Input[0]
#%%Generate Metadata
name_details_results_file = results_file.split('_')
scenario = name_details_results_file[3]
date = datetime.date.today().strftime("%Y-%m-%d")
pathway = name_details_results_file[3]
model = 'OSeMBE'
framework = 'FrameworkNA'
version = 'DataV1R1'
inputoutput = 'Output'
#%% Definition needed results variables
variables = ['AnnualEmissions', 'AnnualTechnologyEmission', 'ProductionByTechnologyAnnual', 'TotalCapacityAnnual', 'UseByTechnologyAnnual','NewCapacity']
#%% Read the data from txt results file
data = | pd.read_csv(results_file, names=['Year']) | pandas.read_csv |
from operator import index
from pyexpat import model
import sys
import os
from scipy.stats import spearmanr
sys.path.append(os.path.abspath("../"))
import pandas as pd
import numpy as np
import pytest
from synthdid.model import SynthDID
from synthdid.sample_data import fetch_CaliforniaSmoking
class TestModelSynth(object):
def test_params_with_originalpaper(self):
"""
Original Paper (see: Arkhangelsky, Dmitry, et al. Synthetic difference in differences. No. w25532. National Bureau of Economic Research, 2019. https://arxiv.org/abs/1812.09970)
"""
test_df = fetch_CaliforniaSmoking()
test_omega = pd.read_csv("test_data/omega_CalifolinaSmoking.csv")
test_lambda = | pd.read_csv("test_data/lambda_CalifolinaSmoking.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # K Nearest Neighbors with Python
#
# You've been given a classified data set from a company! They've hidden the feature column names but have given you the data and the target classes.
#
# We'll try to use KNN to create a model that directly predicts a class for a new data point based off of the features.
#
# Let's grab it and use it!
# ## Import Libraries
#
#
# In[43]:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Get the Data
#
# Set index_col=0 to use the first column as the index.
# In[74]:
df = | pd.read_csv("Classified Data",index_col=0) | pandas.read_csv |
"""Exports burst data to other data structures."""
import pandas as pd
import numpy as np
import os
import itertools
import pickle
from itertools import groupby
def df_export(bursts, offsets, from_svo=False):
"""Exports the burst data to a dataframe.
TODO: remove offsets parameter, as it is not used to generate the dataframe
(as far as I can tell).
TODO: does the 'bursts' column need to be kept for every edge entry?
"""
key_list = []
burst_list = []
offset_list = []
for k, v in bursts.items():
key_list.append(k)
burst_list.append(v)
offset_list.append(offsets[k])
if from_svo == True:
df = pd.DataFrame()
df['svo'] = key_list
intensities = max_intensities(burst_list)
else:
df = pd.DataFrame.from_records(key_list, columns=['word1', 'word2'])
intensities = max_intensities(burst_list)
df['bursts'] = intensities
full_df = flatten(df, intensities)
return full_df
def max_intensities(burst_list):
"""Removes all but the max intensity for each burst interval."""
max_bursts = [{(j, k): i for i, j, k in x} for x in burst_list]
return max_bursts
def flatten(df, intensities):
"""Flattens burst data into dataframe columns.
Depends on the df being in the same order as the list of intensities.
"""
term_id_list = []
interval_start_list = []
interval_end_list = []
intensity_list = []
for i, term in enumerate(intensities):
for interval, intensity in term.items():
term_id_list.append(i)
interval_start_list.append(interval[0])
interval_end_list.append(interval[1])
intensity_list.append(intensity)
temp_df = pd.DataFrame()
temp_df['term_id'], temp_df['interval_start'], temp_df['interval_end'], temp_df['intensity'] =\
term_id_list, interval_start_list, interval_end_list, intensity_list
return_df = pd.merge(df, temp_df, left_index=True, right_on='term_id')
return_df = return_df.sort_values(by=['intensity'], ascending=False)
return return_df
def max_bursts_export(bursts, from_svo=False):
"""Returns a dict with term as key and maximum intensity burst as value.
TODO: make this function export what it means to. As of now, it returns
a dict with all bursts as values.
"""
key_list = []
burst_list = []
for k, v in bursts.items():
key_list.append(k)
burst_list.append(v)
if from_svo:
df = pd.DataFrame()
df['svo'] = key_list
intensities = max_intensities(burst_list)
max_bursts = {df['svo'][x]: intensities[x] for x in df.index}
else:
df = pd.DataFrame.from_records(key_list, columns=['word1', 'word2'])
intensities = max_intensities(burst_list)
max_bursts = {
(df['word1'][x], df['word2'][x]): intensities[x] for x in df.index
}
return max_bursts
def all_bursts_export(bursts, lookup, from_svo=False):
"""Converts the keys of the `bursts` dictionary from ints to strings."""
key_list = []
burst_list = []
for k, v in bursts.items():
key_list.append(k)
burst_list.append(v)
if from_svo:
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
from matplotlib import pyplot as plt
# In[2]:
df = | pd.read_csv('vgsales.csv') | pandas.read_csv |
import os
import glob
import collections
import cv2
import numpy as np
import pandas as pd
import pickle
import time
import settings
IMG_DIR = settings.IMG_DIR
VAL_FILE = settings.VAL_FILE
CLASS_FILE = settings.CLASS_FILE
BBOX_FILE = settings.BBOX_FILE
BBOX_BIN_FILE = os.path.join(settings.DATA_DIR, 'bbox.pk')
BBOX_BIN_FILE_SMALL = os.path.join(settings.DATA_DIR, 'bbox_small.pk')
BAD_IMG_IDS = set([])
MC_CSV = 'mc.csv'
MBB_CSV = 'mbb.csv'
def get_classes():
classes = []
with open(CLASS_FILE, 'r') as f:
for line in f:
classes.append(line.strip().split(',')[0])
return classes
def get_class_dict():
class_dict = {}
with open(CLASS_FILE, 'r') as f:
for line in f:
k, v = line.strip().split(',')
class_dict[k] = v
return class_dict
def get_class_id_converters():
itos = get_classes()
stoi = {itos[i]: i for i in range(len(itos))}
return itos, stoi
def get_class_names(ids):
c_dict = get_class_dict()
itos, stoi = get_class_id_converters()
return [c_dict[itos[i]] for i in ids]
def get_val_ids():
val_ids = []
with open(VAL_FILE, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
val_ids.append(line.strip())
return val_ids
def get_train_ids(img_dir = IMG_DIR):
filenames = glob.glob(os.path.join(img_dir, '*.jpg'))
#print(len(filenames))
img_ids = [os.path.basename(fn).split('.')[0] for fn in filenames]
valset = set(get_val_ids())
img_ids = [img_id for img_id in img_ids if not (img_id in valset or img_id in BAD_IMG_IDS)]
#print(len(img_ids))
return img_ids
def get_test_ids():
df = pd.read_csv(settings.SAMPLE_SUB_FILE)
return df.values[:, 0].tolist()
def get_boxed_train_ids(bbox_dict, img_dir=IMG_DIR, max_num = None):
img_ids = get_train_ids(img_dir)
img_ids = [img_id for img_id in img_ids if img_id in bbox_dict]
if not (max_num is None):
return img_ids[:max_num]
return img_ids
def build_bbox_dict(cls_stoi):
bbox_dict = {} #collections.defaultdict(lambda: [])
with open(BBOX_FILE, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue
row = line.strip().split(',')
value = (cls_stoi[row[2]], [float(row[4]), float(row[6]), float(row[5]), float(row[7])])
if row[0] in bbox_dict:
# return (class, [x1, y1, x2, y2])
bbox_dict[row[0]].append(value)
else:
bbox_dict[row[0]] = [value]
with open(BBOX_BIN_FILE, 'wb') as f:
pickle.dump(bbox_dict, f)
return bbox_dict
def build_small_bbox_dict(img_dir=IMG_DIR, num=1000):
bbox_dict = load_bbox_dict()
img_ids = get_boxed_train_ids(bbox_dict)[:num]
val_img_ids = get_val_ids()[:num]
img_ids.extend(val_img_ids)
small_dict = {k: bbox_dict[k] for k in img_ids if k in bbox_dict}
with open(BBOX_BIN_FILE_SMALL, 'wb') as f:
pickle.dump(small_dict, f)
print(len(small_dict))
def load_small_train_ids():
with open(BBOX_BIN_FILE_SMALL, 'rb') as f:
small_dict = pickle.load(f)
img_ids = list(small_dict.keys())
return small_dict, img_ids
def load_bbox_dict():
with open(BBOX_BIN_FILE, 'rb') as f:
return pickle.load(f)
def draw_img(image, name = '', resize=1):
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image.astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def draw_screen_rect(image, bbox, color=[0,0,255], alpha=0.5):
H, W = image.shape[:2]
x1, y1 = round(bbox[0]*W), round(bbox[1]*H)
x2, y2 = round(bbox[2]*W), round(bbox[3]*H)
#image[y1:y2,x1:x2,:] = (1-alpha)*image[y1:y2,x1:x2,:] + (alpha)*np.array(color, np.uint8)
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
def draw_shadow_text(img, text, pt, color=(255, 0, 0), fontScale=0.5, thickness=1):
#if color1 is None: color1=(0,0,0)
#if thickness1 is None: thickness1 = thickness+2
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
#cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
def build_csvs_from_subset_dir(subset_path):
bbox_dict = build_bbox_dict()
filenames = glob.glob(os.path.join(IMG_DIR, '*.jpg'))
print(len(filenames))
fns = [os.path.basename(o) for o in filenames]
mcs = [' '.join([str(o[0]) for o in bbox_dict[fn.split('.')[0]]]) for fn in fns]
df1 = pd.DataFrame({'fn': fns, 'clas': mcs}, columns=['fn', 'clas'])
df1.to_csv(MC_CSV, index=False)
mbb = [' '.join([' '.join([str(i) for i in o[1]]) for o in bbox_dict[fn.split('.')[0]]]) for fn in fns]
df2 = pd.DataFrame({'fn': fns, 'bbox': mbb}, columns=['fn','bbox'])
df2.to_csv(MBB_CSV, index=False)
def get_fn(img_id):
return os.path.join(IMG_DIR, '{}.jpg'.format(img_id))
def show_img_with_label(img, bb):
itos, stoi = get_class_id_converters()
class_dict = get_class_dict()
#bb = bbox_dict[img_id]
#print(img_id)
print(bb)
#img = cv2.imread(get_fn(img_id))
for b in bb:
draw_screen_rect(img, b[1])
text_x = round(b[1][0] * img.shape[0])
text_y = round(b[1][1] * img.shape[1]) + 10
cls_name = class_dict[itos[b[0]]]
draw_shadow_text(img, cls_name, (text_x, text_y))
draw_img(img)
cv2.waitKey(0)
def show_img_with_label2(img_id):
print(img_id)
df = | pd.read_csv(MBB_CSV) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_uty.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2020 by m.na.akei
# Time-stamp: <2020-09-17 16:19:37>
# Licence:
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# ----------------------------------------------------------------------
import argparse
import textwrap
import sys
import re
from pathlib import Path
from distutils.version import LooseVersion
import json
import numpy as np
import pandas as pd
VERSION = 1.0
PANDAS_MIN_VERSION = "1.1.3"
if LooseVersion(PANDAS_MIN_VERSION) > LooseVersion(pd.__version__):
print("??Error:csv_uty:padnas version must be newer than {}.".format(PANDAS_MIN_VERSION), file=sys.stderr)
sys.exit(1)
OUTPUT_FORMAT_DESC = {
"csv": "comma-separated values (csv) file",
"hdf": "HDF5 file using HDFStore",
"parquet": "the binary parquet format, that reuires pyarrow module",
"pickel": "Pickle (serialize) object to file",
"json": "JSON string",
"feather": "binary Feather format, that requires pyarrow module",
"stata": "Stata dta format"
}
OUTPUT_FORMAT = list(OUTPUT_FORMAT_DESC.keys())
OUTPUT_REQUIRED_FILE = ["hdf", "parquet", "pickel", "feather", "stata"]
def init():
output_format_help = ""
for k, v in OUTPUT_FORMAT_DESC.items():
output_format_help += " {:6s}:{}\n".format(k, v)
arg_parser = argparse.ArgumentParser(description="utility for CSV file",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
remark:
Process of '--serial_column' is done at first, so the result of '--serial_column' may be used as a coluumn for '--add_columns', others.
Note: result of '--serial_column' will be affected at proceeding processing for '--drop_na_columns', others.
All columns are read as "str", so you may convert type of those if need, using '--add_columns', '--trim_columns', '--type_columns'.
For '--add_column', available name of column is to match with '[\w:;@_;:@_]+'.
At '--add_columns', there are '$' prefix column names in right-side of each defitiion, see examples.
At '--trim_columns', in right-side of each definition, there is a lambda function,
the function will be applied by Series.map, see examples.
If you want to use commas in expression of '--add_columns' and '--trim_columns', the comma must be escaped by back-slash. see examples.
For '--add_columns', values of each column, that start with '0b' or '0o' or '0x', are converted int integer internaly,
but at output value of those columns was formatted back into original format.
[DEPRECATE] '--change_timefreq' was deprecated. use 'csv_trimtime'.
For '--change_timefreq', available methods are floor, ceil,round. About format string, you may find answer in
'datetime Basic date and time types https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior'.
About 'freqnecy', you may check the document in
'Time series / date functionality https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases'.
If you want to use commas and colon in expression of '--change_timefreq', those must be escaped by back-slash. see examples.
After other processings, '--trim_columns' are applied immediately before output.
When '--drop_dulicated' was given, first row that has same values of columns will be remained.
For '--decompose_bit_string', new columns are added, names of those has '_Bnnn' as suffix.
NBITS means minimum number of columns for decomposed results.
If values has prefix '0b', '0o','0x' or is string of decimal, then the values are decoded into binary string and decomposed.
And value of elements is treated as bits pattern string. So results have reversed order:
ex: COL_A:10011 => [COL_A_B000:"1",COL_A_B001:"1",COL_A_B002:"0",COL_A_B003:"0",COL_A_B004:"1"]
Note: if there are "100" and "200" in the same column, "100" is treated as binary string "0b100",
but "200" is treated as decimal value and results is ["1","1","0","0","1","0","0","0"].
For '--stack', there are [<column name>,'category','stacked_result'] as columns in header of results.
'<column name>' is name given by '--stack'.
For '--fillna', if '@interpolate' was used as value, NaN are filled by interpolated by linear method with both inside and outside.
If '@forward' was used as value, propagate last valid observation forward to next valid
If '@backward' was used as value, use next valid observation to fill gap.
For '--replace', definition has json format as dict: '{"old_value":"new_value",...}'
NOTE: In definition, old value must be string. ex: '{"1":"A"}'
If you want to use commas in definition of '--replace', those must be escaped by back-slash. see examples.
If you want to use commas and colon in expression of '--split_into_rows' and '--split_into_columns',
those must be escaped by back-slash. see examples.
For '--split_into_columns', the separactor is sigle character or regexp.
If named parameter is used, the names are names of columns in the result. see example.
output format:
csv : comma-separated values (csv) file
hdf : HDF5 file using HDFStore
parquet : the binary parquet format, that reuires pyarrow module
pickeld : Pickle (serialize) object to file
json : JSON string
feather : binary Feather format, that requires pyarrow module
stata : Stata dta format
processing order:
add serial column, dropping columns(regx), dropping rows, dropping na, dropping duplicated, adding columns that was changed with time frequency,
adding columns, triming columns, type columns,filling value for na, replace values, split into rows, split into columns,
decompose bits string, sort/sort by datetime, rename columns, stacking
SECURITY WARNING:
this use 'exec' for '--add_columns' and '--prologe', '--trim_columns' without any sanity.
example:
csv_uty.py --serial_column=serial:100 test1.csv
csv_uty.py --drop_columns=A,B --drop_rows=1 test1.csv
csv_uty.py --drop_na_columns=all test1.csv | csv_uty.py --serial_column=serial:100 -
csv_uty.py --drop_rows=0-1 test1.csv
csv_uty.py --drop_na_columns=P1C1,P1C2,P2C1 test3.csv
csv_uty.py --drop_duplicated=P1C1 test3.csv
csv_uty.py --add_columns='NCOL1="PAT001",NCOL2=12345,NCOL3=${A}+${B}' test1.csv
csv_uty.py --add_column='E=(${D}.fillna("0").apply(lambda x: int(x\,2)) & 0b10)' test1.csv
acol='NCOL1="PAT001",NCOL2=12345,NCOL3=(${D}.fillna("0").apply(lambda x: int(x\,0)) & 0b10)!=0'
csv_uty.py --add_columns="${acol}" test1.csv
acol='NCOL1="PAT001",NCOL2=12345,NCOL3=np.sin(${A}.fillna("0").astype(float))'
csv_uty.py --prologe='import numpy as np;' --add_columns="${acol}" --columns=NCOL1,NCOL2,NCOL3,A,B,C,D test1.csv
acol='NCOL1=${A}.map(lambda x: format(int(x)\, "#010x")\, na_action="ignore")'
csv_uty.py --prologe='import numpy as np;' --add_columns="${acol}" --columns=NCOL1,A,B,C,D test1.csv
# Series pandas 1.1.3 documentation https://pandas.pydata.org/pandas-docs/stable/reference/series.html#accessors
csv_uty.py --add_columns='D=${ABC002}.str.replace(r":\d+$"\,":00"\,regex=True)' big_sample_headers.csv |\\
csv_plot_histogram.py --output=test.html --animation_column=D - ABC005
csv_uty.py --add_columns='D=pd.to_datetime(${ABC002}\,format="%Y-%m-%d %H:%M:%S")' big_sample_headers.csv
csv_uty.py --add_columns='D=pd.to_datetime(${ABC002}\,format="%Y-%m-%d %H:%M:%S"),E=${D}.dt.floor("30s")' big_sample_headers.csv |\\
csv_plot_histogram.py --animation_column=E --output=test.html - ABC005
# the same as above
#[DEPRECATE] '--change_timefreq' was deprecated. use 'csv_trimtime'.
csv_uty.py --change_timefreq='D=ABC002:%Y-%m-%d %H\:%M\:%S:floor:30s' big_sample_headers.csv |\\
csv_plot_histogram.py --animation_column=D --output=test.html - ABC005
csv_uty.py --trim_columns=D="lambda x: int(x\,0)" test1.csv # convert binary string into decimal value.
csv_uty.py --type_columns=A=float,B=bin test2.csv
csv_uty.py --decompose_bit_string=D:16 test1.csv |csvlook -I
csv_uty.py --decompose_bit_string=A,B,C,D --rename_columns=A_B000:BIT_A,A_B001:BIT_B test1.csv
csv_uty.py --rename_columns=A:abc,B:def test1.csv
csv_uty.py --stack=ABC002 bit-pattern-headers.csv
csv_uty.py --fillna=A=1,B=2,C="A B" test1.csv
csv_uty.py --fillna=B=@interpolate test1.csv
csv_uty.py --fillna=A=@forward t1.csv
csv_uty.py --fillna=A=@backward t1.csv
csv_uty.py --replace='A={"1":"A"\,"2":"B"},D={"a":1\,"b":0}' t1.csv
csv_uty.py --split_into_rows="COL003" test_explode.csv
csv_uty.py --split_into_rows="COL002:\:,COL003" test_explode.csv
csv_uty.py --split_into_rows="COL002:\:" test_explode.csv |csvlook -I
| COL001 | COL002 | COL003 | COL004 |
| ------ | ------ | ------ | -------- |
| A | 1 | 1,2,3 | F1|F2|F3 |
| A | 2 | 1,2,3 | F1|F2|F3 |
| A | 3 | 1,2,3 | F1|F2|F3 |
| B | 2 | 4,5,6 | F2 |
| C | 3 | 7,8,9 | F1|F3 |
csv_uty.py --split_into_columns="COL002:\:,COL004" test_explode.csv|csvlook -I
| COL001 | COL002 | COL003 | COL004 | 1 | 2 | 3 | F1 | F2 | F3 |
| ------ | ------ | ------ | -------- | - | - | - | -- | -- | -- |
| A | 1:2:3 | 1,2,3 | F1|F2|F3 | 1 | 1 | 1 | 1 | 1 | 1 |
| B | 2 | 4,5,6 | F2 | 0 | 1 | 0 | 0 | 1 | 0 |
| C | 3 | 7,8,9 | F1|F3 | 0 | 0 | 1 | 1 | 0 | 1 |
csv_uty.py --split_into_columns="COL002:(?P<alpha>\w+)(?P<D>\d+),COL004" test_explode.csv|csvlook -I
| COL001 | COL002 | COL003 | COL004 | alpha | D | F1 | F2 | F3 |
| ------ | ------ | ------ | -------- | ----- | - | -- | -- | -- |
| A | 1:2:3 | 1,2,3 | F1|F2|F3 | | | 1 | 1 | 1 |
| B | AB2 | 4,5,6 | F2 | AB | 2 | 0 | 1 | 0 |
| C | D3 | 7,8,9 | F1|F3 | D | 3 | 1 | 0 | 1 |
# in following example, column 'D' will be created as column of timestamp, and by those dataframe will be made into group and stacked.
# at plot, the timestamp column 'D' will be used as animation key frames.
# [DEPRECATE] '--change_timefreq' was deprecated. use 'csv_trimtime'.
csv_uty.py --change_timefreq='D=ABC002:%Y-%m-%d %H\:%M\:%S:floor:10s' bit-pattern-headers.csv|\\
csv_status.py --mode sum --group D -|csv_uty.py --drop_columns=ABC000,ABC001 - |\\
csv_uty.py --stack=D - |csv_plot_bar.py --output=bit-pattern-headers_10sec_sum.html --animation_column=D --yrange=0,1 - category stacked_result
csv_uty.py --sort=ABC004 test_sort.csv
csv_uty.py --sort="desc|ABC004,ABC005" test_sort.csv
csv_uty.py --sort_datetime="ABC002" test_sort.csv
csv_uty.py --sort_datetime="desc|ABC002" test_sort.csv
csv_uty.py --sort_datetime="ABC002:%Y-%m-%d %H\:%M\:%S" test_sort.csv
csv_uty.py --output_format=hdf --output=test.dat bit-pattern-headers.csv
input: test1.csv
A,B,C,D
1,2,3,0b01010
4,5,6,0b01
input: test3.csv
P1C1,P1C2,P2C1,P2C2,N
1,0,1,0,A
1,0,0,1,B
1,0,1,0,C
1,0,1,0,D
,1,1,1,E
,,1,1,F
1,1,,1,G
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--serial_column",
dest="SERICOLUMN",
help="add new column that has continus numbers, 0-base. If STEP was given, steped number is used.",
type=str,
metavar='COLUMN[:STEP]',
default=None)
arg_parser.add_argument("--drop_columns_regex",
dest="DCOLS_REGEX",
help="pattern of column names to drop",
type=str,
metavar='REGEX',
default=None)
arg_parser.add_argument("--drop_columns",
dest="DCOLS",
help="names of columns to drop",
type=str,
metavar='COLUMN[,COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument("--drop_rows",
dest="DROWS",
help="index of rows to drop, 0-base",
type=str,
metavar='INT[,INT]|INT-INT',
default=None)
arg_parser.add_argument("--drop_na_columns",
dest="DNACOLS",
help="names of columns to check NA and to drop. if 'all', rows are dropped with how='any'",
type=str,
metavar='COLUMN[,COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument(
"--drop_duplicated",
dest="DDUPCOLS",
help="names of columns to check duplicated rows and to drop others than first. if 'all', all columns are used to check",
type=str,
metavar='COLUMN[,COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument("--prologe",
dest="PROLOGE",
help="pieces of python code to pre-load, for use in expression of '--add_columns'.",
type=str,
metavar='CODE;[CODE;CODE;...]',
default=None)
arg_parser.add_argument(
"--change_timefreq",
dest="CHTFREQ",
help="[DEPRECATED]change datetime frequeny unit: format of definitoin is 'new_column_name=old_col_name:format:method:frequency'."
+ " if you use comma or colon in expression, those must be escaped with back-slash",
type=str,
metavar='COLUMN=definition[,COLUMN=definition...]',
default=None)
arg_parser.add_argument("--add_columns",
dest="ACOLS",
help="names and expressions of columns to add or replace, with csv format." +
" if you use comma in expression, the comma must be escaped with back-slash",
type=str,
metavar='COLUMN=expr[,COLUMN=expr...]',
default=None)
arg_parser.add_argument("--trim_columns",
dest="TRMS",
help="piece of python code for each column to replace and output",
type=str,
metavar='COLUMN=CODE[,COLUMN=CODE[,COLUMN=CODE...]',
default=None)
arg_parser.add_argument("--type_columns",
dest="DTYPE",
help="data type for each column:type=str, int, float, bin, oct, hex",
type=str,
metavar='COLUMN=type[,COLUMN=type..]',
default=None)
arg_parser.add_argument("--fillna",
dest="FILLNA",
help="fill na for each column. if starts with '@', internal function will be used, see remark.",
type=str,
metavar='COLUMN=value[,COLUMN=value...]',
default=None)
arg_parser.add_argument("--replace",
dest="REPLACE",
help="replace value for each column",
type=str,
metavar='COLUMN=JSON[,COLUMN=JSON...]',
default=None)
arg_parser.add_argument("--split_into_rows",
dest="SPLIT_CSV",
help="split each element value with csv format and store those into rows, default of separator=','",
type=str,
metavar='COLUMN[:SEPARATOR[,COLUMN:SEPARATOR]]',
default=None)
arg_parser.add_argument("--split_into_columns",
dest="SPLIT_FLAG",
help="split each element value with flag format and store those into columns, default of separator='|'",
type=str,
metavar='COLUMN[:SEPARATOR[,COLUMN:SEPARATOR]]',
default=None)
arg_parser.add_argument("--decompose_bit_string",
dest="DBIT",
help="decompose string as bit pattern. ex 01010101",
type=str,
metavar='COLUMN[:NBITS[,COLUMN...]]',
default=None)
arg_parser.add_argument("--rename_columns",
dest="RENAMECOLS",
help="rename columns",
type=str,
metavar='OLD_NAME:NEW_NAME[,OLD_NAME:NEW_NAME...]',
default=None)
arg_parser.add_argument("--sort",
dest="SORT",
help="sorting for columns, sort_order=ascendig or descendig",
type=str,
metavar="[sort_order|]COLUMN[,COLUMN...]",
default=None)
arg_parser.add_argument("--sort_datetime",
dest="DTSORT",
help="sorting for columns as datetime, sort_order=ascendig or descendig",
type=str,
metavar="[sort_order|]COLUMN:FORMAT",
default=None)
arg_parser.add_argument("--stack",
dest="STACKGCOL",
help="name of column to make group with stacking",
type=str,
metavar='COLUMN',
default=None)
arg_parser.add_argument("--transpose", dest="TRANS", help="transpose dataframe", action="store_true", default=False)
arg_parser.add_argument("--output_format", dest="OFORMAT", help="output format", choices=OUTPUT_FORMAT, default="csv")
arg_parser.add_argument("--columns_regex",
dest="OCOLS_REGEX",
help="pattern of column names to output",
type=str,
metavar='COLUMN[,COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument("--columns",
dest="OCOLS",
help="names of columns to output",
type=str,
metavar='COLUMN[,COLUMN[,COLUMN...]',
default=None)
arg_parser.add_argument("--output",
dest="OUTPUT",
help="path of output csv file, default=stdout",
type=str,
metavar='FILE',
default=sys.stdout)
arg_parser.add_argument('csv_file', metavar='CSV_FILE', help='file to read. if "-", stdin is used')
# arg_parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
# arg_parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = arg_parser.parse_args()
return args
def get_output_file(outname_func, input_file, output_file, buffered=False):
"""retriev path of output file from path of 'input file'.
if 'output_file' was defined, then the value will be returned.
if 'output_file' was not defined, then 'output_file' will be derived from 'input_file'.
:param outname_func: function to make output file name, function has path of input file as only one argument.
:param input_file: path of input file
:param output_file: path of output file
:param buffered: if True and input_file=="-", then sys.stdout.buffer as output_file will be returned.
if False, sys.stdout will be returned.
:returns: path of output file or sys.stdout[.buffer]
:rtype: str or file handler
:exmple:
output_file = get_output_file(lambda x: Path(x).stem + "_test.csv", input_file, output_file)
"""
# output_file = Path(input_file).stem + "_hist.csv"
if output_file is None or len(output_file) == 0:
if isinstance(input_file, str) and input_file != "-":
output_file = outname_func(input_file)
else:
if buffered:
output_file = sys.stdout.buffer
else:
output_file = sys.stdout
elif isinstance(output_file, str) and output_file == "-":
if buffered:
output_file = sys.stdout.buffer
else:
output_file = sys.stdout
if input_file == "-":
input_file = sys.stdin
return output_file
def trim_output_file(output_file, mkdir=True, overwrite=True):
"""check path of output file
:param output_file: path of output file
:param mkdir: if True, do mkdir
:param overwrite: if False and file existed, then exception will be raised.
"""
if isinstance(output_file, str):
p_dir = Path(output_file).parent
if mkdir and not Path(p_dir).exists():
Path(p_dir).mkdir(exist_ok=True, parents=True)
if not overwrite and Path(output_file).exists():
raise Exception("{} already exists".format(output_file))
def type_columns_in_df(df, typ_columns):
""" set type of columns at output results
:param df: dataframe that will be modified inplace
:param type_columns: list of definitions
"""
print("%inf:csv_uty:type_columns:{}".format(typ_columns), file=sys.stderr)
for fc in typ_columns:
cs = re.split(r"\s*=\s*", fc, maxsplit=1)
if len(cs) >= 2:
typ = cs[1]
if cs[0] in df.columns:
try:
if typ == "bin":
df[cs[0]] = df[cs[0]].astype(str, errors="ignore")
# print(df[cs[0]].dtype)
df[cs[0]] = df[cs[0]].apply(lambda x: ("0b{:016b}".format(int(x, 0)) if x != "nan" else x))
elif typ == "oct":
df[cs[0]] = df[cs[0]].astype(str, errors="ignore")
df[cs[0]] = df[cs[0]].apply(lambda x: ("0b{:016o}".format(int(x, 0)) if x != "nan" else x))
elif typ == "hex":
df[cs[0]] = df[cs[0]].astype(str, errors="ignore")
df[cs[0]] = df[cs[0]].apply(lambda x: ("0b{:016x}".format(int(x, 0)) if x != "nan" else x))
else:
df[cs[0]] = df[cs[0]].astype(cs[1])
except ValueError as e:
print("??Error:csv_uty:type_columns_in_df:{}={}:{}".format(cs[0], cs[1], e), file=sys.stderr)
except TypeError as e:
print("??Error:csv_uty:type_columns_in_df:{}={}:{}".format(cs[0], cs[1], e), file=sys.stderr)
else:
print("#warning:csv_uty:type_columns: invalid definitin for type columns:{}".format(fc), file=sys.stderr)
def trim_columns_in_df(df, fmt_columns):
""" trim output results for each column
:param df: dataframe that will be modified inplace
:param fmt_columns: list of definitions
"""
print("%inf:csv_uty:trim_columns:{}".format(fmt_columns), file=sys.stderr)
for fc in fmt_columns:
cs = re.split(r"\s*=\s*", fc, maxsplit=1)
if len(cs) >= 2:
if cs[0] in df.columns:
cs[1] = re.sub(r'\\,', r',', cs[1])
try:
estr = 'df["{0}"] = df["{0}"].map({1}, na_action="ignore")'.format(cs[0], cs[1])
print("%Inf:csv_uty:trim columns:{}".format(estr), file=sys.stderr)
exec(estr)
except ValueError as e:
print("??Error:csv_uty:trim_columns_in_df:{}={}:{}".format(cs[0], cs[1], e), file=sys.stderr)
else:
print("#warning:csv_uty:trim_columns: invalid definitin for triming columns:{}".format(fc), file=sys.stderr)
def add_columns_to_df(df, add_columns):
"""add or replace columns as result of evaluating expression
:param df: dataframe that will be modified inplace.
:param add_columns: list of defitions
"""
print("%inf:csv_uty:add_columns:{}".format(add_columns), file=sys.stderr)
for ac in add_columns:
cs = re.split(r"\s*=\s*", ac, maxsplit=1)
if len(cs) >= 2:
# if cs[0] in df.columns:
# print("??Error:csv_uty:add_columns:{} was already exists".format(cs[0]), file=sys.stderr)
# exit(1)
rcs = cs[1]
# rcs = re.sub(r'\$(\w+)', r'df["\1"]', rcs)
# rcs = re.sub(r'\$([\w:;@_;:@_\(\)()]+)', r'df["\1"]', rcs)
rcs = re.sub(r'\${([^}]+)}', r'df["\1"]', rcs)
rcs = re.sub(r'(0b[01]+|0o[0-7]+|0x[0-9a-f]+)', lambda m: str(int(m.group(1), 0)), rcs, re.IGNORECASE)
rcs = re.sub(r'\\,', r',', rcs)
try:
estr = 'df["{}"]={}'.format(cs[0], rcs)
print("%Inf:csv_uty:add columns:{}".format(estr), file=sys.stderr)
exec(estr)
except TypeError as e:
print("??Error:csv_uty:add_columns_to_df:{}={}:{}".format(cs[0], rcs, e), file=sys.stderr)
else:
print("#warning:csv_uty:add_columns: invalid definitin for add columns:{}".format(ac), file=sys.stderr)
def prefix_number_to_int(df):
"""convert prefixed integer in dataframe into integer
:param df: dataframe that will be modified inplace.
:returns: information of modified columns
:rtype: list of dict
"""
done_columns = []
for col in df.columns:
val = str(df[col][0])
if re.match(r'^(0x[a-f0-9]|0o[0-7]+|0b[01]+)$', val, re.IGNORECASE) is not None:
df[col] = df[col].map(lambda x: int(x, 0), na_action='ignore')
done_columns.append({"column": col, "mode": val[:2], "length": len(val) - 2})
return done_columns
def int_to_prefix_numer(df, done_columns):
"""convert integer in dataframe into formatted string
:param df: dataframe that will be modified inplace
:param done_columns: result from prefix_number_to_int
"""
for col_d in done_columns:
col = col_d["column"]
mode = col_d["mode"]
slen = col_d["length"]
fmt = "#0{}{}".format(slen + 2, mode[1])
df[col] = df[col].map(lambda x: format(int(x), fmt), na_action='ignore')
def parse_drop_rows(d_rows):
"""parse defitions about dorpping rows
:param d_rows: list of definitions
:returns: list of rows
:rtype: list[int]
"""
result = []
for dr in d_rows:
if re.search(r"-", dr) is not None:
csv = dr.split("-")
result.extend(range(int(csv[0]), int(csv[1]) + 1))
else:
result.append(dr)
result = list(map(int, result))
return result
def decomp_bits_pattern(df, column_name, nbits=0):
"""decompose string into character as bits pattern.
:param df: DataFrame
:param column_name: name of column
:returns: DataFrame
:rtype:
:remark:
If possible, value will be translated into integer and converted into binary pattern.
In case of failure, columns are filled by '0'.
If there was quatation, they is removed.
If there are "100" and "200" in the same column, "100" is treated as binary string "0b100",
but "200" is treated as decimal value and results is ["1","1","0","0","1","0","0","0"].
"""
print("%inf:csv_uty:decomp_bits:{}".format(column_name), file=sys.stderr)
df.reset_index(inplace=True)
ds = df[column_name]
if format(ds.dtype) != "string" and format(ds.dtype) != "object":
print("??Error:csv_uty:{} has no string.".format(column_name), file=sys.stderr)
return
if nbits > 0:
cns = ["{}_B{:03d}".format(column_name, i) for i in range(nbits)]
df[cns] = 0
cnames = set(cns)
else:
cnames = set()
ds.fillna("0", inplace=True)
for ir in range(len(df)):
val = ds.loc[ir]
if val is np.nan or val is None:
continue
val = val.strip('"\'') # removing quotation
val = re.sub(r"\.0$", "", val) # removing the trailing ".0"
if not val.isdecimal() and not (val.startswith("0b") or val.startswith("0o") or val.startswith("0x") and val[2:].isdecimal()):
print("??Error:csv_uty:{} was not decimal value:{}".format(val, column_name), file=sys.stderr)
continue
val = val.lower()
if val.isdecimal() and len(re.sub(r"[01]", "", val)) > 0:
val = "{:0b}".format(int(val))
elif val.startswith("0b"):
val = "{:0b}".format(int(val[2:], 2))
elif val.startswith("0o"):
val = "{:0b}".format(int(val[2:], 8))
elif val.startswith("0x"):
val = "{:0b}".format(int(val[2:], 16))
vals = list(reversed(list(val)))
vname = ["{}_B{:03d}".format(column_name, i) for i in range(len(vals))]
cnames.update(set(vname))
df.loc[ir, vname] = vals
print("%Inf:csv_uty:new columns was added: {}".format(sorted(list(cnames))), file=sys.stderr)
# fillna by '0'
for cn in cnames:
df[cn].fillna(0, inplace=True)
df[cn] = df[cn].astype('int64')
return df
def change_time_frequency(df, ch_definitions):
"""FIXME! briefly describe function
:param df:
:param ch_definitions: [new_column_name=old_column:format:method:frequency,...]
:returns:
:rtype:
"""
print("%inf:csv_uty:change_timefreq:{}".format(ch_definitions), file=sys.stderr)
print("#warn:csv_uty:change_timefreq:THIS IS DEPRECATED. USE csv_trimtime.py", file=sys.stderr)
try:
for cdf in ch_definitions:
cvs = re.split(r"\s*(?<!\\)=\s*", cdf)
cname = cvs[0]
if len(cvs) < 2:
print("??error:csv_uty:change_timefreq:invalid format of definition:{}".format(cdf), file=sys.stderr)
sys.exit(1)
cvs = re.split(r"\s*(?<!\\):\s*", cvs[1])
if len(cvs) < 3:
print("??error:csv_uty:change_timefreq:invalid format of definition:{}".format(cdf), file=sys.stderr)
sys.exit(1)
t_col = cvs[0]
t_format = cvs[1]
t_method = cvs[2]
t_freq = cvs[3]
t_format = re.sub(r"\\:", ":", t_format)
t_format = re.sub(r"\\=", "=", t_format)
df[cname] = pd.to_datetime(df[t_col], format=t_format)
if t_method == "floor":
df[cname] = df[cname].dt.floor(t_freq).dt.strftime(t_format)
elif t_method == "ceil":
df[cname] = df[cname].dt.ceil(t_freq).dt.strftime(t_format)
elif t_method == "round":
df[cname] = df[cname].dt.round(t_freq).dt.strftime(t_format)
else:
print("#warn:csv_uty:invalid method for '--change_timefreq':{} in {}".format(t_method, cdf), file=sys.stderr)
continue
vcs = df[cname].value_counts()
print("%inf:csv_uty:change_timefreq:column={}:number of uniq periods={}:max count in each period={}".format(
cname, len(vcs), max(vcs)),
file=sys.stderr)
except ValueError as e:
print("??error:csv_uty:change time frequency:{}:{}".format(t_col, e), file=sys.stderr)
sys.exit(1)
return df
def do_fillna(df, fillna_defs):
"""FIXME! briefly describe function
:param df:
:param fillna_defs: [COLUMN=[@]value,...]
:returns:
:rtype:
"""
print("%inf:csv_uty:fillna:{}".format(fillna_defs), file=sys.stderr)
for fd in fillna_defs:
cvs = re.split(r"\s*(?<!\\)=\s*", fd)
cname = cvs[0]
f_value = cvs[1]
if f_value == "@interpolate":
df[cname] = df[cname].astype('float64', errors='ignore')
df[cname].interpolate(limit_area='inside', inplace=True)
df[cname].interpolate(limit_area='outside', inplace=True)
elif f_value == "@forward":
df[cname].ffill(inplace=True)
elif f_value == "@backward":
df[cname].bfill(inplace=True)
else:
df[cname].fillna(f_value, axis=0, inplace=True)
return df
def do_replace(df, replace_defs):
"""FIXME! briefly describe function
:param df:
:param replace_defs: [COLUMN=JSON,...]
:returns:
:rtype:
"""
print("%inf:csv_uty:replace:{}".format(replace_defs), file=sys.stderr)
for fd in replace_defs:
cvs = re.split(r"\s*(?<!\\)=\s*", fd)
cname = cvs[0]
r_value = cvs[1]
r_value = re.sub(r"\\,", ",", r_value)
rep_def = json.loads(r_value)
df[cname].replace(rep_def, inplace=True)
return df
def do_split_into_rows(df, split_csvs):
"""FIXME! briefly describe function
:param df:
:param split_csvs: [COLUMN[:SEPARATOR],...]
:returns:
:rtype:
"""
print("%inf:csv_uty:split_into_rows:{}".format(split_csvs), file=sys.stderr)
for sc in split_csvs:
cvs = re.split(r"\s*(?<!\\):\s*", sc)
cname = cvs[0]
if len(cvs) > 1:
sep = cvs[1].lstrip("\\")
else:
sep = ","
if len(sep) == 0:
print("??error:csv_uty:invalid separator:'{}'".format(sep), file=sys.stderr)
sys.exit(1)
df[cname] = df[cname].str.split(sep)
df = df.explode(cname)
return df
def do_split_into_columns(df, split_flags):
"""FIXME! briefly describe function
:param df:
:param split_flags: [COLUMN[:SEPARATOR],...]]
if length of SEPARATOR, it is treadted as regexp.
:returns:
:rtype:
"""
print("%inf:csv_uty:split_into_columns:{}".format(split_flags), file=sys.stderr)
for sf in split_flags:
cvs = re.split(r"\s*(?<!\\):\s*", sf)
cname = cvs[0]
if len(cvs) > 1:
sep = cvs[1].lstrip("\\")
else:
sep = "|"
if len(sep) == 0:
print("??error:csv_uty:invalid separator:'{}'".format(sep), file=sys.stderr)
sys.exit(1)
if len(sep) > 1:
res_df = df[cname].str.extract(sep, expand=True)
else:
res_df = df[cname].str.get_dummies(sep=sep)
df = pd.concat([df, res_df], axis=1)
return df
def do_sort(df, column_defs, datetime_fmt=None):
if datetime_fmt is None:
print("%inf:csv_uty:sort:{}".format(column_defs), file=sys.stderr)
else:
print("%inf:csv_uty:sort as datetime:{},fmt={}".format(column_defs, datetime_fmt), file=sys.stderr)
# column_defs= [asc_or_desc|]column[,column...]
ascending = True
if column_defs.find("|") != -1:
cvs = re.split(r"\|", column_defs)
if cvs[0].lower().startswith("asc"):
ascending = True
elif cvs[0].lower().startswith("desc"):
ascending = False
else:
print("#warn:csv_uty:sort:invalid sort order:{}, ascending is assumed".format(cvs[0]), file=sys.stderr)
column_defs = cvs[1]
columns = re.split(r"\s*,\s*", column_defs)
if datetime_fmt is not None:
for cn in columns:
df[cn] = pd.to_datetime(df[cn], format=datetime_fmt)
df.sort_values(columns, ascending=ascending, inplace=True, axis=0, na_position="last")
return df
def output_dataframe(df, output_file, output_format, index=False, columns=[]):
"""FIXME! briefly describe function
:param df:
:param output_file:
:param output_format:
:param index:
:param columns:
:returns:
:rtype:
"""
print("%inf:csv_uty:output into:{} with '{}' format".format(output_file, output_format), file=sys.stderr)
if len(columns) > 0:
d_cols = list(set(df.columns) - set(columns))
df.drop(columns=d_cols, inplace=True)
if output_format == "csv":
df.to_csv(output_file, index=index)
elif output_format == "hdf":
df.to_hdf(output_file, key="csv_uty", mode="w", complevel=6)
elif output_format == "parquet":
df.to_parquet(output_file, index=index)
elif output_format == "pickel":
df.to_pickle(output_file)
elif output_format == "json":
df.to_json(output_file)
elif output_format == "feather":
df.to_feather(output_file)
elif output_format == "stata":
df.to_stata(output_file, write_index=index)
# def do_join_columns(df):
# df[cname] = df[columns].agg(sep.join, axis=1)
# pass
if __name__ == "__main__":
args = init()
csv_file = args.csv_file
output_file = args.OUTPUT
trans_mode = args.TRANS
prologe = args.PROLOGE
output_format = args.OFORMAT
if output_format in OUTPUT_REQUIRED_FILE and output_file == sys.stdout:
print("??error:csv_uty:'--output' was required fot format:{}".format(output_format), file=sys.stderr)
sys.exit(1)
if prologe is not None:
pps = re.split(r'\s*;\s*', prologe)
for pp in pps:
if len(pp) == 0:
continue
print("%Inf:csv_uty:exec python code:{}".format(pp), file=sys.stderr)
exec(pp)
output_columns_s = args.OCOLS
output_columns = []
if output_columns_s is not None:
output_columns = re.split(r"\s*,\s*", output_columns_s)
output_columns_regex = args.OCOLS_REGEX
# sorting
sort_defs = args.SORT
dt_sort_defs = args.DTSORT
dt_sort_fmt = None
if sort_defs is not None and dt_sort_defs is not None:
print("??error:csv_uty:invalid combination between '--sort' and '--sort_datetime'", file=sys.stderr)
sys.exit(1)
if dt_sort_defs is not None:
cvs = re.split(r"(?<!\\):", dt_sort_defs)
if len(cvs) > 1:
dt_sort_fmt = cvs[1]
dt_sort_fmt = re.sub(r"\\", "", dt_sort_fmt)
else:
dt_sort_fmt = "%Y-%m-%d %H:%M:%S"
sort_defs = cvs[0]
# time frequency
ch_timefreqs_s = args.CHTFREQ
ch_timefreqs = []
if ch_timefreqs_s is not None:
ch_timefreqs = re.split(r"\s*(?<!\\),\s*", ch_timefreqs_s)
# fillna
fillna_defs_s = args.FILLNA
fillna_defs = []
if fillna_defs_s is not None:
fillna_defs = re.split(r"\s*(?<!\\),\s*", fillna_defs_s)
# replace
replace_defs_s = args.REPLACE
replace_defs = []
if replace_defs_s is not None:
replace_defs = re.split(r"\s*(?<!\\),\s*", replace_defs_s)
# split csv
split_csvs_s = args.SPLIT_CSV
split_csvs = []
if split_csvs_s is not None:
split_csvs = re.split(r"\s*(?<!\\),\s*", split_csvs_s)
# split flag
split_flags_s = args.SPLIT_FLAG
split_flags = []
if split_flags_s is not None:
split_flags = re.split(r"\s*(?<!\\),\s*", split_flags_s)
# decompose string
decomp_bits_column_s = args.DBIT
decomp_bits_columns = []
if decomp_bits_column_s is not None:
decomp_bits_columns = re.split(r"\s*,\s*", decomp_bits_column_s)
# drop columns
drop_columns_s = args.DCOLS
drop_columns = []
if drop_columns_s is not None:
drop_columns = re.split(r"\s*,\s*", drop_columns_s)
# print("%Inf:csv_uty:removed columns:{}".format(drop_columns), file=sys.stderr)
for dc in drop_columns:
if dc in output_columns:
output_columns.remove(dc)
print("#warning:csv_uty:{} was rmoved from output columns".format(dc), file=sys.stderr)
# drop columns
drop_columns_regex = args.DCOLS_REGEX
# drop rows
drop_rows_s = args.DROWS
drop_rows = []
if drop_rows_s is not None:
drop_rows = re.split(r"\s*,\s*", drop_rows_s)
print("%Inf:csv_uty:removed rows:{}".format(drop_rows), file=sys.stderr)
# drop na
drop_na_columns_s = args.DNACOLS
drop_na_columns = []
if drop_na_columns_s is not None:
drop_na_columns = re.split(r"\s*,\s*", drop_na_columns_s)
print("%Inf:csv_uty:check na and drop:{}".format(drop_na_columns), file=sys.stderr)
# drop duplicated rows
drop_dup_columns_s = args.DDUPCOLS
drop_dup_columns = []
if drop_dup_columns_s is not None:
drop_dup_columns = re.split(r"\s*,\s*", drop_dup_columns_s)
print("%Inf:csv_uty:check duplicated and drop:{}".format(drop_dup_columns), file=sys.stderr)
# add columns
add_columns_s = args.ACOLS
add_columns = []
if add_columns_s is not None:
add_columns = re.split(r"\s*(?<!\\),\s*", add_columns_s)
# trim columns
trm_columns_s = args.TRMS
trm_columns = []
if trm_columns_s is not None:
trm_columns = re.split(r"\s*(?<!\\),\s*", trm_columns_s)
# type columns
typ_columns_s = args.DTYPE
typ_columns = []
if typ_columns_s is not None:
typ_columns = re.split(r"\s*(?<!\\),\s*", typ_columns_s)
if csv_file == "-":
in_file = sys.stdin
else:
in_file = csv_file
serial_column_s = args.SERICOLUMN
serial_column = ""
if serial_column_s is not None:
cvs = re.split(r"\s*:\s*", serial_column_s)
serial_column = cvs[0]
if len(cvs) > 1:
serial_step = int(cvs[1])
else:
serial_step = 1
rename_columns_s = args.RENAMECOLS
rename_columns = {}
if rename_columns_s is not None:
cvs = re.split(r"\s*,\s*", rename_columns_s)
for cs in cvs:
cns = re.split(r"\s*:\s*", cs)
if len(cns) < 2:
print("#warn:csv_uty:invalid argument for rename columns:{}".format(cs), file=sys.stderr)
else:
rename_columns[cns[0]] = cns[1]
stack_group_column = args.STACKGCOL
if stack_group_column is not None and trans_mode:
print("??error:csv_uty:invalid combination: '--stack' and '--transpose'", file=sys.stderr)
sys.exit(1)
#--- processig
print("%Inf:csv_uty:read data from {}".format(in_file), file=sys.stderr)
csv_df = | pd.read_csv(in_file, dtype="string") | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
import os
import ray
from ray.data.context import DatasetContext
from ray.data.datasource.csv_datasource import CSVDatasource
from ray.internal.internal_api import memory_summary
from ray.tests.conftest import * # noqa
def expect_stages(pipe, num_stages_expected, stage_names):
stats = pipe.stats()
for name in stage_names:
name = " " + name + ":"
assert name in stats, (name, stats)
assert len(pipe._optimized_stages) == num_stages_expected, pipe._optimized_stages
def test_memory_sanity(shutdown_only):
info = ray.init(num_cpus=1, object_store_memory=500e6)
ds = ray.data.range(10)
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
meminfo = memory_summary(info.address_info["address"], stats_only=True)
# Sanity check spilling is happening as expected.
assert "Spilled" in meminfo, meminfo
def test_memory_release_eager(shutdown_only):
info = ray.init(num_cpus=1, object_store_memory=1500e6)
ds = ray.data.range(10)
# Round 1.
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
meminfo = memory_summary(info.address_info["address"], stats_only=True)
assert "Spilled" not in meminfo, meminfo
# Round 2.
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
meminfo = memory_summary(info["address"], stats_only=True)
# TODO(ekl) we can add this back once we clear inputs on eager exec as well.
# assert "Spilled" not in meminfo, meminfo
def test_memory_release_lazy(shutdown_only):
info = ray.init(num_cpus=1, object_store_memory=1500e6)
ds = ray.data.range(10)
# Should get fused into single stage.
ds = ds._experimental_lazy()
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
ds.fully_executed()
meminfo = memory_summary(info.address_info["address"], stats_only=True)
assert "Spilled" not in meminfo, meminfo
def test_memory_release_lazy_shuffle(shutdown_only):
# TODO(ekl) why is this flaky? Due to eviction delay?
error = None
for trial in range(3):
print("Try", trial)
try:
info = ray.init(num_cpus=1, object_store_memory=1800e6)
ds = ray.data.range(10)
# Should get fused into single stage.
ds = ds._experimental_lazy()
ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8))
ds.random_shuffle().fully_executed()
meminfo = memory_summary(info.address_info["address"], stats_only=True)
assert "Spilled" not in meminfo, meminfo
return
except Exception as e:
error = e
print("Failed", e)
finally:
ray.shutdown()
raise error
def test_spread_hint_inherit(ray_start_regular_shared):
ds = ray.data.range(10)._experimental_lazy()
ds = ds.map(lambda x: x + 1)
ds = ds.random_shuffle()
for s in ds._plan._stages:
assert s.ray_remote_args == {}, s.ray_remote_args
ds._plan._optimize()
assert len(ds._plan._stages) == 1, ds._plan._stages
assert ds._plan._stages[0].ray_remote_args == {"scheduling_strategy": "SPREAD"}
def test_optimize_fuse(ray_start_regular_shared):
context = DatasetContext.get_current()
def build_pipe():
pipe = ray.data.range(3).window(blocks_per_window=1).repeat(2)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.random_shuffle_each_window()
results = [sorted(p.take()) for p in pipe.iter_epochs()]
assert results == [[0, 1, 2], [0, 1, 2]], results
return pipe
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
["read->map_batches->map_batches->random_shuffle_map", "random_shuffle_reduce"],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
[
"read",
"map_batches->map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
2,
[
"read",
"map_batches->map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = False
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
3,
[
"read",
"map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
def test_optimize_incompatible_stages(ray_start_regular_shared):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="actors")
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read",
"map_batches",
"map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.map_batches(lambda x: x, num_cpus=0.75)
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read->map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
def get(self):
return self.value
def reset(self):
self.value = 0
class MySource(CSVDatasource):
def __init__(self, counter):
self.counter = counter
def _read_stream(self, f, path: str, **reader_args):
count = self.counter.increment.remote()
ray.get(count)
for block in CSVDatasource._read_stream(self, f, path, **reader_args):
yield block
def test_optimize_reread_base_data(ray_start_regular_shared, local_path):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
# Re-read on.
N = 4
df1 = | pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
names = ["Round","nodes alive","sent","avg energy","max energy","min energy"]
minimum = [999999999999999, 0]
columnas_nodes_alive = | pd.DataFrame() | pandas.DataFrame |
# Catboost for Avito Demand Prediction Challenge
# https://www.kaggle.com/c/avito-demand-prediction
# By <NAME>, April 2018
#https://www.kaggle.com/nicapotato/simple-catboost/code
import time
notebookstart = time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
from sklearn.model_selection import KFold
# print("Data:\n", os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import *
# Viz
# import seaborn as sns
# import matplotlib.pyplot as plt
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col="item_id", parse_dates=["activation_date"])
traindex = training.index
len_train = len(training)
testing = pd.read_csv('../input/test.csv', index_col="item_id", parse_dates=["activation_date"])
testdex = testing.index
y = training.deal_probability.copy()
training.drop("deal_probability", axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
# Combine Train and Test
df = | pd.concat([training, testing], axis=0) | pandas.concat |
# TITLE: SIMULATION ENGINE APP
# DESCRIPTION: This code outputs the Emergent Alliance's Simulation Engine to an app that can be interacted with using a web browser.
# AUTHOR: <NAME> - Data Scientist at IBM
# v1 - <NAME>
# v2 - <NAME>, <NAME>
# IMPORTS
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from scipy.integrate import odeint, simps
from sqlalchemy import create_engine
import json
# CLASS AND FUNCTION DEFINITIONS
# Read data in
@st.cache
def read_data(path = None, engine = None, table = None, region=None):
if (path == None) & (engine != None):
_df = pd.read_sql(table, engine)
_df.index = _df['Sectors']
_df.drop(columns = 'Sectors', inplace = True)
_df.columns = _df.index
elif (path != None) & (engine == None) & ((region == 'UK') | (region is None)):
_df = pd.read_csv(path + '/A_UK.csv', header = [0, 1], index_col= [0, 1])
_df.columns = _df.index.get_level_values(1).values
_df.index = _df.columns
elif (path != None) & (engine == None) & (region == 'US'):
_df = | pd.read_csv(path + '/A_US.csv', index_col=0) | pandas.read_csv |
import sys
from pathlib import Path
from typing import Dict, Optional, Sequence
from warnings import simplefilter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import add_constant
from tqdm import tqdm
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import (get_time_series, load_statewise_data,
state_name_lookup)
from epimargin.smoothing import notched_smoothing
from epimargin.utils import cwd, days
simplefilter("ignore")
def project(dates, R_values, smoothing, period = 7*days):
julian_dates = [_.to_julian_date() for _ in dates[-smoothing//2:None]]
return OLS(
RR_pred[-smoothing//2:None],
add_constant(julian_dates)
)\
.fit()\
.predict([1, julian_dates[-1] + period])[0]
# set to cloud temp directory if not explicitly told to run locally
root = cwd() if len(sys.argv) > 1 and sys.argv[1] == "--local" else Path("/tmp")
data = root/"data"
# model details
gamma = 0.2
smoothing = 10
CI = 0.95
download_data(data, 'state_wise_daily.csv')
state_df = load_statewise_data(data/"state_wise_daily.csv")
country_time_series = get_time_series(state_df)
estimates = []
timeseries = []
# country level
(
dates,
RR_pred, RR_CI_upper, RR_CI_lower,
T_pred, T_CI_upper, T_CI_lower,
total_cases, new_cases_ts,
anomalies, anomaly_dates
) = analytical_MPVS(country_time_series["Hospitalized"].iloc[:-1], CI = CI, smoothing = notched_smoothing(window = smoothing))
country_code = state_name_lookup["India"]
for row in zip(dates, RR_pred, RR_CI_upper, RR_CI_lower):
timeseries.append((country_code, *row))
# state level rt estimates
state_time_series = get_time_series(state_df, 'state')
state_names = list(state_time_series.index.get_level_values(level=0).unique())
max_len = 1 + max(map(len, state_names))
with tqdm(state_names) as states:
for state in states:
state_code = state_name_lookup[state]
states.set_description(f"{state :<{max_len}}")
try:
(dates, RR_pred, RR_CI_upper, RR_CI_lower, T_pred, T_CI_upper, T_CI_lower, total_cases, new_cases_ts, anomalies, anomaly_dates) = analytical_MPVS(state_time_series.loc[state]['Hospitalized'], CI = CI, smoothing = notched_smoothing(window = smoothing), totals = False)
r = pd.Series(RR_pred, index = dates)
print("|::|", state, ",", round(r[(r.index > "2020-04-01") & (r.index < "2020-05-01")].max(), 3), ",", state_time_series.loc[state].Hospitalized.sum())
for row in zip(dates, RR_pred, RR_CI_upper, RR_CI_lower):
timeseries.append((state_code, *row))
estimates.append((state_code, RR_pred[-1], RR_CI_lower[-1], RR_CI_upper[-1], project(dates, RR_pred, smoothing)))
except (IndexError, ValueError):
estimates.append((state, np.nan, np.nan, np.nan, np.nan))
estimates = | pd.DataFrame(estimates) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 08:26:01 2020
@author: matth
"""
import numpy as np
from sklearn.model_selection import KFold
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import ExtraTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import NuSVC
from sklearn.svm import SVC
#from sklearn.mixture import DPGMM
# -*- coding: utf-8 -*-
def get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev = 0, verbose=False, get_total=True):
df_sel = input_df[input_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
labels_sel = input_labels[input_labels.index.isin(df_sel.index)]
odds_sel = odds_input[odds_input.index.isin(df_sel.index)]
if len(odds_sel.columns) == 6:
best_score = custom_cv_eval_mov(df_sel, input_model, labels_sel, odds_sel, min_ev = min_ev, verbose=verbose,
get_total=get_total)
else:
best_score = custom_cv_eval(df_sel, input_model, labels_sel, odds_sel, min_ev = min_ev, verbose=verbose,
get_total=get_total)
return best_score
#Input: American Odds, and Probability of a Winning Bet
#Output: Bet EV based on a $100 bet
def get_bet_ev(odds, prob):
if odds>0:
return ((odds * prob) - (100 * (1-prob)) )
else:
return ((100 / abs(odds))*100*prob - (100 * (1-prob)))
#Input: American Odds
#Output: Profit on a successful bet
def get_bet_return(odds):
if odds>0:
return odds
else:
return (100 / abs(odds))*100
# get_ev_from_df_mov(odds_test, probs, labels_test, label_list, print_stats = True, min_ev = input_ev, get_total=True)
def get_ev_from_df_mov(df_odds, probs, labels, label_list, probs_label_list, print_stats = False, min_ev = 0, get_total=True):
probs_label_list = [int(a) for a in probs_label_list]
#labels = [int(a) for a in labels]
df_odds.reset_index(drop=True, inplace=True)
labels.reset_index(drop=True, inplace=True)
score = 0
#print(df_odds)
for i in range(len(df_odds)):
#print(i)
# df_temp_odds = df_odds.iloc[[i, :]]
#print(df_odds.iloc[[i]])
for l in range(len(probs[i])):
#print(f"{label_list[probs_label_list[l]]}: {probs[i][l]}")
temp_odds = (df_odds.loc[[i]])[label_list[probs_label_list[l]]][i]
#print((temp_odds))
bet_ev = get_bet_ev(temp_odds, probs[i][l])
#print(bet_ev)
if bet_ev > min_ev:
#print(l)
if labels[i] == probs_label_list[l]:
#print(f"{int(labels[i])} {probs_label_list[l]}")
score = score + get_bet_return(temp_odds)
#print(f"Winning Bet. New Score: {score}")
else:
score = score - 100
#print(f"Losing Bet. New Score: {score}")
#print()
#print(f"Result: {label_list[int(labels[i])]} ({int(labels[i])})")
print("Real Score: " + str(score))
return(score)
def get_ev_from_df(ev_df, print_stats = False, min_ev = 0, get_total=True):
num_matches = 0
num_bets = 0
num_wins = 0
num_losses= 0
num_under= 0
num_under_losses = 0
num_under_wins = 0
num_even = 0
num_even_losses = 0
num_even_wins = 0
num_fav = 0
num_fav_wins = 0
num_fav_losses = 0
profit = 0
profit_per_bet = 0
profit_per_match = 0
for index, row in ev_df.iterrows():
num_matches = num_matches+1
t1_bet_ev = get_bet_ev(row['t1_odds'], row['t1_prob'])
#print(f"ODDS:{row['t1_odds']} PROB: {row['t1_prob']} EV: {t1_bet_ev}")
t2_bet_ev = get_bet_ev(row['t2_odds'], row['t2_prob'])
#print(f"ODDS:{row['t2_odds']} PROB: {row['t2_prob']} EV: {t2_bet_ev}")
#print()
t1_bet_return = get_bet_return(row['t1_odds'])
t2_bet_return = get_bet_return(row['t2_odds'])
if (t1_bet_ev > min_ev or t2_bet_ev > min_ev):
num_bets = num_bets+1
if t1_bet_ev > min_ev:
if row['winner'] == 0:
num_wins += 1
profit = profit + t1_bet_return
#print(t1_bet_return)
elif row['winner'] == 1:
num_losses += 1
profit = profit - 100
if (t1_bet_return > t2_bet_return):
num_under += 1
if row['winner'] == 0:
num_under_wins += 1
elif row['winner'] == 1:
num_under_losses += 1
elif (t1_bet_return < t2_bet_return):
num_fav += 1
if row['winner'] == 0:
num_fav_wins += 1
elif row['winner'] == 1:
num_fav_losses += 1
else:
num_even += 1
if row['winner'] == 0:
num_even_wins += 1
elif row['winner'] == 1:
num_even_losses += 1
if t2_bet_ev > min_ev:
if row['winner'] == 1:
num_wins += 1
profit = profit + t2_bet_return
elif row['winner'] == 0:
num_losses += 1
profit = profit - 100
if (t2_bet_return > t1_bet_return):
num_under += 1
if row['winner'] == 1:
num_under_wins += 1
elif row['winner'] == 0:
num_under_losses += 1
elif (t2_bet_return < t1_bet_return):
num_fav += 1
if row['winner'] == 1:
num_fav_wins += 1
elif row['winner'] == 0:
num_fav_losses += 1
else:
num_even += 1
if row['winner'] == 1:
num_even_wins += 1
elif row['winner'] == 0:
num_even_losses += 1
if num_bets > 0:
profit_per_bet = profit / num_bets
else:
profit_per_bet = 0
if num_matches > 0:
profit_per_match = profit / num_matches
else:
profit_per_match = 0
if print_stats:
print(f"""
Number of matches: {num_matches}
Number of bets: {num_bets}
Number of winning bets: {num_wins}
Number of losing bets: {num_losses}
Number of underdog bets: {num_under}
Number of underdog wins: {num_under_wins}
Number of underdog losses: {num_under_losses}
Number of Favorite bets: {num_fav}
Number of favorite wins: {num_fav_wins}
Number of favorite losses: {num_fav_losses}
Number of even bets: {num_even}
Number of even wins: {num_even_wins}
Number of even losses: {num_even_losses}
Profit: {profit}
Profit per bet: {profit_per_bet}
Profit per match: {profit_per_match}
""")
if (get_total):
#print(f"# Matches: {num_matches}, # Bets: {num_bets} # Wins: {num_wins}")
return(profit)
else:
return (profit_per_bet)
#INPUT:
#df: The df to be evaluated
#m: The model to use
# labels: The labels
#odds: The odds
#min_ev: The minimum EV to place a bet
def custom_cv_eval(df, m, labels, odds, min_ev=0, verbose=False, get_total=True):
#If we have less than 5 samples we are going to break the split.
if len(df) < 5:
return 0
X = np.array(df)
y = np.array(labels)
odds = np.array(odds)
running_total = 0
count=1
kf = KFold(n_splits=5, shuffle=True, random_state=75)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
odds_train, odds_test = odds[train_index], odds[test_index]
#display(y_train)
scaler = StandardScaler()
scaled_train = scaler.fit_transform(X_train)
scaled_test = scaler.transform(X_test)
m.fit(scaled_train, y_train)
probs=m.predict_proba(scaled_test)
#print(probs)
#We need to prep the dataframe to evaluate....
#X_odds = X_test[['t1_odds', 't2_odds']]
#print(X_test)
#print(X_test[:, -1])
#print(X_test[:, -2])
X_odds = list(zip(odds_test[:, -2], odds_test[:, -1], probs[:, 0], probs[:, 1], y_test))
ev_prepped_df = pd.DataFrame(X_odds, columns=['t1_odds', 't2_odds', 't1_prob', 't2_prob', 'winner'])
#display(ev_prepped_df)
#display(temp_df)
#print(f"{count}: {get_ev_from_df(ev_prepped_df, print_stats = False)}")
count=count+1
running_total = running_total + get_ev_from_df(ev_prepped_df, print_stats = verbose, min_ev = min_ev, get_total=get_total)
#display(ev_prepped_df)
return running_total
def get_ev_for_optimize_mov(df_odds, probs, labels, print_stats = False, min_ev = 0, get_total=True):
score = 0
#print(df_odds)
for i in range(len(df_odds)):
#print(i)
# df_temp_odds = df_odds.iloc[[i, :]]
#print()
#print()
#print(df_odds[i])
for l in range(len(probs[i])):
temp_odds = (df_odds[i][l])
#print((temp_odds))
bet_ev = get_bet_ev(temp_odds, probs[i][l])
#print(bet_ev)
if bet_ev > min_ev:
#print(l)
if labels[i] == l:
#print(f"{int(labels[i])} {l}")
score = score + get_bet_return(temp_odds)
#print(f"Winning Bet. New Score: {score}")
else:
score = score - 100
#print(f"Losing Bet. New Score: {score}")
#print()
#print(f"Result: {labels[i]}")
return(score)
def custom_cv_eval_mov(df, m, labels, odds, min_ev=0, verbose=False, get_total=True):
#If we have less than 5 samples we are going to break the split.
#print("HI")
if len(df) < 5:
return 0
X = np.array(df)
y = np.array(labels)
odds = np.array(odds)
running_total = 0
count=1
kf = KFold(n_splits=5, shuffle=True, random_state=75)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
odds_train, odds_test = odds[train_index], odds[test_index]
#display(y_train)
scaler = StandardScaler()
scaled_train = scaler.fit_transform(X_train)
scaled_test = scaler.transform(X_test)
m.fit(scaled_train, y_train)
probs=m.predict_proba(scaled_test)
#print(probs)
#We need to prep the dataframe to evaluate....
#X_odds = X_test[['t1_odds', 't2_odds']]
#print(X_test)
#print(X_test[:, -1])
#print(X_test[:, -2])
#X_odds = list(zip(odds_test[:, -2], odds_test[:, -1], probs[:, 0], probs[:, 1], y_test))
#ev_prepped_df = pd.DataFrame(X_odds, columns=['t1_odds', 't2_odds', 't1_prob', 't2_prob', 'winner'])
#display(ev_prepped_df)
#display(temp_df)
#print(f"{count}: {get_ev_from_df(ev_prepped_df, print_stats = False)}")
count=count+1
running_total = running_total + get_ev_for_optimize_mov(odds_test, probs, y_test, min_ev= min_ev, get_total=get_total )
#display(ev_prepped_df)
return running_total
#We split off the labels and the odds. Now we can rewrite the function
#INPUT
#pos_features: The list of possible features
#m: The model
#cur_features: The list of current features
#scale: Does the data need to be scaled?
def get_best_features(pos_features, m, df, cur_features, labels, odds, scale=False, min_ev=0):
best_feature = ''
#If there are no current features...
if len(cur_features) == 0:
best_score = -10000
else:
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
best_score = custom_cv_eval(df_sel, m, labels_sel, odds_sel, min_ev=min_ev)
best_feature = ""
print(f"Current best score is: {best_score}")
#Go thru every feature and test it...
for f in pos_features:
#If f is not a current feature
if f not in cur_features:
new_features = [f] + cur_features
df_sel = df[new_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#display(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
new_score = custom_cv_eval(df_sel, m, labels_sel, odds_sel, min_ev=min_ev)
#print(f"{len(df_sel)} {len(labels_sel)} {len(odds_sel)}")
if new_score > best_score:
print(f"Feature: {f} Score: {new_score}")
best_score = new_score
best_feature = f
if best_feature != "":
print(f"The best feature was {best_feature}. It scored {best_score}")
cur_features = [best_feature] + cur_features
#Keep running until we don't improve
return(get_best_features(pos_features, m, df, cur_features, labels, odds, scale, min_ev=min_ev))
else:
print("NO IMPROVEMENT")
print(f"FINAL BEST SCORE: {best_score}")
return cur_features
return []
def get_best_features_mov(pos_features, m, df, cur_features, labels, odds, label_list, scale=False, min_ev=0):
best_feature = ''
#If there are no current features...
if len(cur_features) == 0:
best_score = -1000000
else:
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
labels_sel = labels_sel.dropna()
odds_sel = odds_sel[odds_sel.index.isin(labels_sel.index)]
df_sel = df_sel[df_sel.index.isin(labels_sel.index)]
best_score = custom_cv_eval_mov(df_sel, m, labels_sel, odds_sel, min_ev=min_ev)
best_feature = ""
print(f"Current best score is: {best_score}")
#Go thru every feature and test it...
for f in pos_features:
#If f is not a current feature
if f not in cur_features:
new_features = [f] + cur_features
df_sel = df[new_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
#display(df_sel)
#OK we need to filter the labels and odds based off of the indices
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
labels_sel = labels_sel.dropna()
odds_sel = odds_sel[odds_sel.index.isin(labels_sel.index)]
df_sel = df_sel[df_sel.index.isin(labels_sel.index)]
new_score = custom_cv_eval_mov(df_sel, m, labels_sel, odds_sel, min_ev=min_ev)
#print(f"{len(df_sel)} {len(labels_sel)} {len(odds_sel)}")
if new_score > best_score:
print(f"Feature: {f} Score: {new_score}")
best_score = new_score
best_feature = f
if best_feature != "":
print(f"The best feature was {best_feature}. It scored {best_score}")
cur_features = [best_feature] + cur_features
#Keep running until we don't improve
return(get_best_features_mov(pos_features, m, df, cur_features, labels, odds, label_list, scale, min_ev=min_ev))
else:
print("NO IMPROVEMENT")
print(f"FINAL BEST SCORE: {best_score}")
return cur_features
return []
def tune_LogisticRegression(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. penalty ('l1' or 'l2')
#2. tol (original_value, original_value * 1.2, original_value * 0.8, rand(0, 10)
#3. random_state = 75
#4. solver = 'newton-cg', 'lbfgs', 'sag', 'saga'
###############################################################################################################
print()
print()
print("Starting New Run for LogisticRegression")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
penalty = ['l1', 'l2', 'none']
solver = ['newton-cg', 'lbfgs', 'sag']
tol = [input_model.tol, input_model.tol * 1.2, input_model.tol * .8, random.random() * 10 ]
for s in solver:
score = -10000
for p in penalty:
for t in tol:
if ((s == 'newton-cg') & (p == 'l1')) |\
((s == 'lbfgs') & (p == 'l1')) |\
((s == 'sag') & (p == 'l1')):
pass
else:
test_model = LogisticRegression(solver = s, penalty = p, tol=t, random_state=75, max_iter=50000)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("solver:", s,
"penalty:", p,
"tol:", t,
"Best Score:", best_score)
print()
print()
else:
pass
print("solver:", s,
"penalty:", p,
"tol:", t,
"Score:", score)
return(output_model)
def tune_DecisionTreeClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('gini', 'entropy')
#2. splitter ('random', 'best')
#3. max_depth ('none', IF A NUMBER EXISTS +1, -1, random, else 2 RANDOM INTS 1->100)
#4. min_samples_leaf(n-1, 0, n+1)
#5. max_leaf_nodes:('none', n+1, n-1, OR 4 random numbers)
###############################################################################################################
print()
print()
print("Starting New Run for DecisionTree")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
criterion = ['gini', 'entropy']
splitter = ['random', 'best']
if input_model.max_depth == None:
max_depth = [None, random.randrange(100)+1, random.randrange(100)+1]
else:
max_depth = [input_model.max_depth, input_model.max_depth - 1, input_model.max_depth + 1, random.randrange(100)+1]
max_depth = [i for i in max_depth if i > 0]
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf *1.01,
input_model.min_samples_leaf*0.99]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
if ((input_model.max_leaf_nodes == None) or (input_model.max_leaf_nodes == 1)):
max_leaf_nodes = [None, random.randrange(1000)+1, random.randrange(1000)+1]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, input_model.max_leaf_nodes - 1,
input_model.max_leaf_nodes + 1, random.randrange(1000)+1]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 0]
for l in max_leaf_nodes:
for sam in min_samples_leaf:
for m in max_depth:
for c in criterion:
for s in splitter:
test_model = DecisionTreeClassifier(criterion = c, splitter = s, max_depth = m,
min_samples_leaf=sam, max_leaf_nodes = l, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c, "splitter:", s, "max_depth:", m,
"min_samples_leaf:", sam, "max_leaf_nodes:", l, best_score)
print()
else:
pass
print("Criterion:", c, "splitter:", s, "max_depth:", m,
"min_samples_leaf:", sam, "max_leaf_nodes:", l, score)
return output_model
def tune_RandomForestClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0, tested_hps = []):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('gini', 'entropy')
#2. max_features ('auto', 'sqrt', 'log2')
#3. max_depth ('none', IF A NUMBER EXISTS +2, -2, ELSE 2 RANDOM INTS 1->100)
#4. min_samples_leaf(n-2, 0, n+2)
#5. max_leaf_nodes:('none', n+2, n-2, OR 2 random numbers)
#6. n_estimators: (n, n+2, n-2)
###############################################################################################################
print()
print()
print("Starting New Run for RandomForestClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
#1. criterion ('gini', 'entropy')
criterion = ['gini', 'entropy']
#2. max_features ('auto', 'log2')
max_features = ['auto', 'log2', None]
#3. max_depth ('none', IF A NUMBER EXISTS +2, +4, -2, -4 ELSE 4 RANDOM INTS 1->100)
if input_model.max_depth == None:
max_depth = [None, random.randrange(100)+1]
else:
max_depth = [input_model.max_depth, random.randrange(100)+1]
max_depth = [i for i in max_depth if i > 0]
#4. min_samples_leaf(n-1, n-2, 0, n+1, n+2)
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf*1.01, input_model.min_samples_leaf*0.99]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
#5. max_leaf_nodes:('none', n+1, n+2, n-1, n-2, OR 4 random numbers)
if ((input_model.max_leaf_nodes == None) or (input_model.max_leaf_nodes == 1)):
max_leaf_nodes = [None, random.randrange(1000)+1]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, random.randrange(1000)+1]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 1 ]
n_estimators = [input_model.n_estimators, random.randrange(200)+1]
n_estimators = [i for i in n_estimators if i > 0]
for n in n_estimators:
for ml in max_leaf_nodes:
for ms in min_samples_leaf:
for md in max_depth:
for mf in max_features:
for c in criterion:
if (len(tested_hps) == 6) and (n in tested_hps[0]) and (ml in tested_hps[1]) and (ms in tested_hps[2]) and \
(md in tested_hps[3]) and (mf in tested_hps[4]) and (c in tested_hps[5]):
print("PASS. We have already tested this.")
else:
test_model = RandomForestClassifier(n_estimators = n, max_leaf_nodes = ml,
min_samples_leaf = ms,
max_depth = md, criterion = c,
max_features = mf,
n_jobs = -1,
random_state=75)
#score = random.random()
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, "n_estimators", n, best_score)
print()
print()
else:
pass
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, "n_estimators", n, score)
new_hps = [n_estimators, max_leaf_nodes, min_samples_leaf, max_depth, max_features, criterion]
return output_model, new_hps
def tune_ExtraTreeClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0, tested_hps = []):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('gini', 'entropy')
#2. max_features ('auto', 'sqrt', 'log2')
#3. max_depth ('none', IF A NUMBER EXISTS +2, -2, ELSE 2 RANDOM INTS 1->100)
#4. min_samples_leaf(n-2, 0, n+2)
#5. max_leaf_nodes:('none', n+2, n-2, OR 2 random numbers)
#6. n_estimators: (n, n+2, n-2)
###############################################################################################################
print()
print()
print("Starting New Run for RandomForestClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
#1. criterion ('gini', 'entropy')
criterion = ['gini', 'entropy']
#2. max_features ('auto', 'log2')
max_features = ['auto', 'log2', None]
#3. max_depth ('none', IF A NUMBER EXISTS +2, +4, -2, -4 ELSE 4 RANDOM INTS 1->100)
if input_model.max_depth == None:
max_depth = [None, random.randrange(100)+1]
else:
max_depth = [input_model.max_depth, random.randrange(100)+1]
max_depth = [i for i in max_depth if i > 0]
#4. min_samples_leaf(n-1, n-2, 0, n+1, n+2)
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf*1.01, input_model.min_samples_leaf*0.99]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
#5. max_leaf_nodes:('none', n+1, n+2, n-1, n-2, OR 4 random numbers)
if ((input_model.max_leaf_nodes == None) or (input_model.max_leaf_nodes == 1)):
max_leaf_nodes = [None, random.randrange(1000)+1]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, random.randrange(1000)+1]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 1 ]
for ml in max_leaf_nodes:
for ms in min_samples_leaf:
for md in max_depth:
for mf in max_features:
for c in criterion:
if (len(tested_hps) == 6) and (ml in tested_hps[1]) and (ms in tested_hps[2]) and \
(md in tested_hps[3]) and (mf in tested_hps[4]) and (c in tested_hps[5]):
print("PASS. We have already tested this.")
else:
test_model = RandomForestClassifier(
min_samples_leaf = ms,
max_depth = md, criterion = c,
max_features = mf,
random_state=75)
#score = random.random()
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, best_score)
print()
print()
else:
pass
print("Criterion:", c, "max_features:", mf, "max_depth:", md, "min_samples_leaf:", ms,
"max_leaf_nodes:", ml, score)
new_hps = [max_leaf_nodes, min_samples_leaf, max_depth, max_features, criterion]
return output_model, new_hps
def tune_GradientBoostingClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0, tested_hps = []):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. criterion ('friedman_mse', 'mse', 'mae')
#2. loss ('deviance', 'exponential')
#3. n_estimators (n, n+1, n-1)
#4. learning_rate (learning_rate, learning_rate *1.1, learning_rate*.9)
#5. min_samples_leaf: (n, n-1, n+1)
#6. max_depth: (n, n+1, n-1)
#7. max_features: (None, 'auto', 'sqrt', 'log2')
#8. max_leaf_nodes: (None, n+1, n-1, OR 2 random numbers)
#9. tol (n, n*1.1, n*.9)
###############################################################################################################
print()
print()
print("Starting New Run")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
#1. criterion ('friedman_mse', 'mse', 'mae')
criterion = ['friedman_mse']
#2. loss ('deviance', 'exponential')
loss = ['deviance']
#3. n_estimators (n, n+1, n-1)
n_estimators = [input_model.n_estimators, random.randrange(200)+1]
n_estimators = [i for i in n_estimators if i > 0]
#4. learning_rate (learning_rate, learning_rate *1.1, learning_rate*.9)
learning_rate = [input_model.learning_rate]
#5. min_samples_leaf: (n, n-1, n+1)
min_samples_leaf = [input_model.min_samples_leaf, input_model.min_samples_leaf*0.99, input_model.min_samples_leaf*1.01]
min_samples_leaf = [i for i in min_samples_leaf if i > 0]
#6. max_depth: (n, n+1, n-1)
if input_model.max_depth == None:
max_depth = [None, random.randrange(100)+1]
else:
max_depth = [input_model.max_depth, random.randrange(100)+1]
max_depth = [i for i in max_depth if i > 0]
#7. max_features: (None, 'auto', 'sqrt', 'log2')
max_features = ['sqrt', 'log2', None]
#8. max_leaf_nodes: (None, n+1, n-1, OR 2 random numbers)
if input_model.max_leaf_nodes == None:
max_leaf_nodes = [None, random.randrange(1000)+1]
else:
max_leaf_nodes = [input_model.max_leaf_nodes, random.randrange(1000)+1]
max_leaf_nodes = [i for i in max_leaf_nodes if i > 0]
#9. tol (n, n*1.1, n*.9)
tol = [input_model.tol, random.random()]
print(len(tol) * len(max_leaf_nodes) * len(max_features) * len(max_depth) * len(min_samples_leaf) * len(learning_rate) * len(n_estimators) * len(loss) * len(criterion))
for t in tol:
for ml in max_leaf_nodes:
for mf in max_features:
for md in max_depth:
for ms in min_samples_leaf:
for lr in learning_rate:
for n in n_estimators:
for l in loss:
for c in criterion:
test_model = GradientBoostingClassifier(n_estimators = n,
learning_rate = lr,
criterion = c,
min_samples_leaf = ms,
max_depth = md,
loss = l,
max_features = mf,
max_leaf_nodes = ml,
tol = t,
random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Criterion:", c,
"n_estimators:", n,
"Loss:", l,
"Learning Rate:", lr,
"Min Samples/Leaf:", ms,
"Max Depth:", md,
"Max Features:", mf,
"Max Leaf Nodes:", ml,
"tol:", t,
"Best Score:", best_score)
print()
print()
else:
pass
print("Criterion:", c,
"n_estimators:", n,
"Loss:", l,
"Learning Rate:", lr,
"Min Samples/Leaf:", ms,
"Max Depth:", md,
"Max Features:", mf,
"Max Leaf Nodes:", ml,
"tol:", t,
"Score:", score)
return(output_model)
def tune_GaussianNB(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. var_smoothing (1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6)
###############################################################################################################
print()
print()
print("Starting New Run for GaussianNB")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
var_smoothing = [1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6]
for v in var_smoothing:
test_model = GaussianNB(var_smoothing = v)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("var_smoothing:", v,
"Best Score:", best_score)
print()
print()
else:
pass
print("var_smoothing:", v,
"Score:", score)
return output_model
def tune_MLPClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. var_smoothing (1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6)
###############################################################################################################
print()
print()
print("Starting New Run for MLPClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
hidden_layer_sizes = [input_model.hidden_layer_sizes, (100,), (10,5), (6,), (1,),(5,), (15,),(25,)]
for h in hidden_layer_sizes:
test_model = MLPClassifier(hidden_layer_sizes=h)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("hidden_layer_sizes:", h,
"Best Score:", best_score)
print()
print()
else:
pass
print("hidden_layer_sizes:", h,
"Score:", score)
return output_model
def tune_RadiusNeighborsClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. weights ('uniform' or 'distance')
#2. p (1 or 2)
###############################################################################################################
print()
print()
print("Starting New Run for RadiusNeighborsClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
weights = ['uniform', 'distance']
p = [1,2]
for w in weights:
score = -10000
for p1 in p:
test_model = RadiusNeighborsClassifier(weights = w, p=p1, outlier_label='most_frequent')
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("weights:", w,
"p:", p1,
"Best Score:", best_score)
print()
print()
else:
pass
print("weights:", w,
"p:", p1,
"Score:", score)
return(output_model)
def tune_KNeighborsClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. n_neighbors (1,2,3,4,5,6,7,8,9,10)
#2. weights ('uniform', 'distance')
###############################################################################################################
print()
print()
print("Starting New Run for KNeighborsClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
weights = ['uniform', 'distance']
n_neighbors = [1,2,3,4,5,6,7,8,9,10]
for w in weights:
score = -10000
for n in n_neighbors:
test_model = KNeighborsClassifier(weights = w, n_neighbors=n)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("weights:", w,
"n_neighbors:", n,
"Best Score:", best_score)
print()
print()
else:
pass
print("weights:", w,
"n_neighbors:", n,
"Score:", score)
return(output_model)
def tune_SGDClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. penalty ('l1', 'l2')
#2. alpha (input_model.alpha, input_model.alpha*.9, input_model.alpha*1.1)
#3. loss ('modified_huber', 'log')
###############################################################################################################
print()
print()
print("Starting New Run for SGDClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
penalty = ['l1', 'l2']
alpha = [input_model.alpha, input_model.alpha*.9, input_model.alpha*1.1]
loss = ['modified_huber', 'log']
for l in loss:
for p in penalty:
score = -10000
for a in alpha:
test_model = SGDClassifier(loss = l, penalty = p, alpha = a, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("loss: ", l,
"penalty: ", p,
"alpha: ", a,
"Best Score: ", best_score)
print()
print()
else:
pass
print("loss: ", l,
"penalty: ", p,
"alpha: ", a,
"Score: ", score)
return(output_model)
def tune_BaggingClassifier(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. base_estimator ('GaussianNB()', 'DecisionTreeClassifier()', LogisticRegression(), RadiusNeighborsClassifier())
#2. bootstrap(True, False)
#3. n_estimators(input_model.n_estimators, input_model.n_estimators+3, input_model.n_estimators-3)
###############################################################################################################
print()
print()
print("Starting New Run for BaggingClassifier")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
base_estimator = [GaussianNB(), DecisionTreeClassifier(random_state=75), LogisticRegression(random_state=75)]
bootstrap = [True, False]
n_estimators = [input_model.n_estimators, input_model.n_estimators+3, input_model.n_estimators-3]
for be in base_estimator:
for bs in bootstrap:
for e in n_estimators:
if e > 0:
test_model = BaggingClassifier(base_estimator = be, bootstrap = bs, n_estimators = e, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("base_estimator: ", be,
"bootstrap: ", bs,
"n_estimators: ", e,
"Best Score: ", best_score)
print()
print()
else:
pass
print("base_estimator: ", be,
"bootstrap: ", bs,
"n_estimators: ", e,
"Score: ", score)
return(output_model)
def tune_LinearDiscriminantAnalysis(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. solver ['svd', 'lsqr', 'eigen']
#2. tol [n, n*1.1, n*.9]
###############################################################################################################
print()
print()
print("Starting New Run for LinearDiscriminantAnalysis")
print(input_model)
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
solver = ['svd', 'lsqr']
tol = [input_model.tol, input_model.tol*1.1, input_model.tol*0.9]
for s in solver:
for t in tol:
test_model = LinearDiscriminantAnalysis(solver = s, tol = t)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("solver: ", s,
"tol: ", t,
"Best Score: ", best_score)
print()
print()
else:
pass
print("solver: ", s,
"tol: ", t,
"Score: ", score)
print("output model: " + str(output_model))
return(output_model)
def tune_NuSVC(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. nu = [input_model.nu, input_model.nu*1.1, input_model.nu*0.9, random.random()]
#2. tol = [input_model.tol, input_model.nu*1.1, input_model.nu*0.9, random.random()]
###############################################################################################################
print()
print()
print("Starting New Run for NuSVC")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
nu = [input_model.nu, input_model.nu*1.1, input_model.nu*0.9, random.random()]
tol = [input_model.tol, input_model.tol*1.1, input_model.tol*0.9, random.random()]
for n in nu:
for t in tol:
test_model = NuSVC(nu=n, tol=t, probability=True, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("nu: ", n,
"tol: ", t,
"Best Score: ", best_score)
print()
print()
else:
pass
print("nu: ", n,
"tol: ", t,
"Score: ", score)
return(output_model)
def tune_SVC(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. C = [input_model.nu, input_model.nu*1.1, input_model.nu*0.9, random.random()*1000]
#2. tol = [input_model.tol, input_model.nu*1.1, input_model.nu*0.9, random.random()]
###############################################################################################################
print()
print()
print("Starting New Run for SVC")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
C = [input_model.C, input_model.C*1.1, input_model.C*0.9, random.random()*1000]
tol = [input_model.tol, input_model.tol*1.1, input_model.tol*0.9, random.random()]
for n in C:
for t in tol:
test_model = SVC(C=n, tol=t, probability=True, random_state=75)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("C: ", n,
"tol: ", t,
"Best Score: ", best_score)
print()
print()
else:
pass
print("C: ", n,
"tol: ", t,
"Score: ", score)
return(output_model)
"""
def tune_DPGMM(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
###############################################################################################################
#Parameters we are going to fine-tune:
#1. covariance_type = ['spherical', 'tied', 'diag', 'full']
#2. tol = [input_model.tol, input_model.tol*1.1, input_model.tol*0.9, random.random()]
###############################################################################################################
print()
print()
print("Starting New Run for DPGMM")
print()
print()
output_model = input_model
best_score = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=min_ev)
print("Previous Best Score:", best_score)
covariance_type = ['spherical', 'tied', 'diag', 'full']
tol = [input_model.tol, input_model.tol*1.1, input_model.tol*0.9, random.random()]
for n in covariance_type:
for t in tol:
test_model = DPGMM(covariance_type = n, tol = t)
score = get_ev(input_df, test_model, input_features, input_labels, odds_input, min_ev=min_ev)
if score > best_score:
best_score = score
output_model = test_model
print()
print("NEW BEST SCORE")
print("Covariance Type: ", n,
"tol: ", t,
"Best Score: ", best_score)
print()
print()
else:
pass
print("Covariance Type: ", n,
"tol: ", t,
"Score: ", score)
return(output_model)
"""
#Let's just hold for now
def tune_hyperparameters_mov(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
return(input_model)
def tune_hyperparameters(input_model, input_features, input_df, input_labels, odds_input, min_ev=0):
best_model = input_model
keep_going = True
"""
if isinstance(input_model, DPGMM):
while(keep_going):
pos_model = (tune_DPGMM(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
"""
if isinstance(input_model, SVC):
while(keep_going):
pos_model = (tune_SVC(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, NuSVC):
while(keep_going):
pos_model = (tune_NuSVC(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, LinearDiscriminantAnalysis):
while(keep_going):
pos_model = (tune_LinearDiscriminantAnalysis(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
print("pos model: " + str(pos_model))
print(" output_model: " + str(output_model))
else:
best_model = pos_model
elif isinstance(input_model, BaggingClassifier):
while(keep_going):
pos_model = (tune_BaggingClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, SGDClassifier):
while(keep_going):
pos_model = (tune_SGDClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, LogisticRegression):
while(keep_going):
pos_model = (tune_LogisticRegression(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, DecisionTreeClassifier):
while(keep_going):
pos_model = (tune_DecisionTreeClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, RandomForestClassifier):
tested_hps = []
while(keep_going):
pos_model, tested_hps = (tune_RandomForestClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev, tested_hps=tested_hps))
print(tested_hps)
print(len(tested_hps))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, ExtraTreeClassifier):
tested_hps = []
while(keep_going):
pos_model, tested_hps = (tune_ExtraTreeClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev, tested_hps=tested_hps))
print(tested_hps)
print(len(tested_hps))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, GradientBoostingClassifier):
print("HI")
while(keep_going):
pos_model = (tune_GradientBoostingClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, MLPClassifier):
print("MLPClassifier")
while(keep_going):
pos_model = (tune_MLPClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, GaussianNB):
while(keep_going):
pos_model = (tune_GaussianNB(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, RadiusNeighborsClassifier):
while(keep_going):
pos_model = (tune_RadiusNeighborsClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
elif isinstance(input_model, KNeighborsClassifier):
while(keep_going):
pos_model = (tune_KNeighborsClassifier(best_model, input_features, input_df, input_labels, odds_input, min_ev=min_ev))
if str(pos_model) == str(best_model): #Direct comparisons don't seem to work....
keep_going = False
output_model = best_model
else:
best_model = pos_model
else:
output_model = input_model
print("Real output model: " + str(output_model))
return(output_model)
#Let's just hold....
def tune_ev_mov(input_model, input_features, input_df, input_labels, odds_input, verbose=False):
return(0)
def tune_ev(input_model, input_features, input_df, input_labels, odds_input, verbose=False):
best_ev = -1000000
best_pos = -1
for temp_ev in range(250):
pos_ev = get_ev(input_df, input_model, input_features, input_labels, odds_input, min_ev=temp_ev, verbose=verbose,
get_total=True)
print(temp_ev, pos_ev)
if pos_ev > best_ev:
best_ev = pos_ev
best_pos = temp_ev
return best_pos
def remove_to_improve_mov(cur_features, m, df, labels, odds, scale=False, min_ev = 0):
number_of_features = len(cur_features)
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
orig_score = custom_cv_eval_mov(df_sel, m, labels_sel, odds_sel, get_total=True, min_ev = min_ev)
best_features = cur_features
best_score = orig_score
print(f"The original score is {orig_score}")
if number_of_features == 0:
return []
if number_of_features == 1:
return cur_features
for z in range(number_of_features):
temp_feature = df.columns[z]
temp_features = cur_features.copy()
#Remove a feature
del temp_features[z]
df_sel = df[temp_features]
df_sel = df_sel.dropna()
df_sel = pd.get_dummies(df_sel)
labels_sel = labels[labels.index.isin(df_sel.index)]
odds_sel = odds[odds.index.isin(df_sel.index)]
temp_score = custom_cv_eval_mov(df_sel, m, labels_sel, odds_sel, get_total=True, min_ev = min_ev)
if temp_score > best_score:
best_features = temp_features
best_score = temp_score
print(f"NEW BEST FEATURE SET WITH: " + temp_feature + " REMOVED " + str(best_score))
#Get a score
if best_features != cur_features:
return remove_to_improve_mov(best_features, m, df, labels, odds, scale, min_ev)
else:
return best_features
def remove_to_improve(cur_features, m, df, labels, odds, scale=False, min_ev = 0):
#If the list is empty we can just return it without doing anything
number_of_features = len(cur_features)
df_sel = df[cur_features]
df_sel = df_sel.dropna()
df_sel = | pd.get_dummies(df_sel) | pandas.get_dummies |
import os
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from tools.utils import threshold_raw_values
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
from tools.utils import rmse_parameters
def main(
model_outputs: pd.DataFrame,
gt_column: str,
save_dir: str,
save_name: str,
) -> None:
metric_fns = {
"MAE": mean_absolute_error,
"MSE": mean_squared_error,
"RMSE": rmse_parameters(squared=False),
"R2": r2_score,
}
metrics = {
"Threshold": [],
"MAE": [],
"MSE": [],
"RMSE": [],
"R2": [],
}
gt_values = model_outputs[gt_column]
thresholds = [t * 0.01 for t in range(0, 101)]
for _, threshold in enumerate(thresholds):
metrics["Threshold"].append(threshold)
threshold_values = model_outputs.apply(
threshold_raw_values,
threshold=threshold,
inference_columns=["lung_segment_" + str(idx+1) for idx in range(6)],
axis=1,
)
threshold_values = np.array(threshold_values)
for metric_name, metrics_fn in metric_fns.items():
metrics[metric_name].append(metrics_fn(gt_values, threshold_values))
save_path = os.path.join(save_dir, save_name)
df_metrics = pd.DataFrame(metrics)
df_metrics.to_csv(save_path, index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_outputs_all", type=str)
parser.add_argument("--gt_column", default="GT", type=str)
parser.add_argument("--dataset", default="all", type=str, help="all or a specific dataset name")
parser.add_argument("--label", default="all", type=str, help="all, Normal, or COVID-19")
parser.add_argument("--subset", default="all", type=str, help="all, train, val, or test")
parser.add_argument("--save_dir", default="resources", type=str)
parser.add_argument("--save_name", default="threshold_selection.csv", type=str)
args = parser.parse_args()
df = | pd.read_csv(args.model_outputs_all) | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import subprocess
import time
import click
import pandas as pd
N_SAMPLES = [10, 20, 40, 60, 100, 200]
EPSILON = [0, 0.01, 0.05, 0.1]
BETA_SCALE = [1, 1 / 3, 1 / 9, 1 / 16, 1 / 20]
DELTA = [0.01, 0.05]
SLURM_TEMPLATE = '''#!/bin/bash -l
#SBATCH --chdir ./
#SBATCH --mem 32GB
#SBATCH --ntasks 1
#SBATCH --cpus-per-task 1
#SBATCH --job-name {name}
#SBATCH --time 48:00:00
#SBATCH --partition serial
source /home/kjablonk/anaconda3/bin/activate
conda activate dispersant_screener
python run_pal_on_dispersant_repeats_cli.py {epsilon} {delta} {beta_scale} 1 . {n_samples}
'''
THIS_DIR = os.path.dirname(__file__)
def write_submission_script(counter, epsilon, delta, beta_scale, n_samples):
name = 'ePAL_{}'.format(counter)
script = SLURM_TEMPLATE.format(**{
'name': name,
'epsilon': epsilon,
'delta': delta,
'beta_scale': beta_scale,
'n_samples': n_samples
})
filename = name + '.slurm'
with open(filename, 'w') as fh:
fh.write(script)
return filename
@click.command('cli')
@click.option('--submit', is_flag=True)
def main(submit):
experiments = []
counter = 0
for n_samples in N_SAMPLES:
for epsilon in EPSILON:
for beta_scale in BETA_SCALE:
for delta in DELTA:
experiment = {
'counter': counter,
'n_samples': n_samples,
'epsilon': epsilon,
'beta_scale': beta_scale,
'delta': delta
}
experiments.append(experiment)
SUBMISSIONSCRIPTNAME = write_submission_script(counter, epsilon, delta, beta_scale, n_samples)
if submit:
subprocess.call('sbatch {}'.format(SUBMISSIONSCRIPTNAME), shell=True, cwd='.')
time.sleep(1)
counter += 1
df = | pd.DataFrame(experiments) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [0, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
result = trex_empty.eec_diet_max(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_bird(self):
"""
unit test for function eec_dose_bird;
internal call to 'eec_diet_max' --> 'eed_diet_timeseries' --> conc_initial' and 'conc_timestep' are included;
internal call to 'fi_bird' included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_bird' are correctly implemented
* methods called inside of 'eec_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([7.763288, 2693.2339, 22.20837], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 240.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
# variables for 'fi_bird' (values reflect unittest for 'at_bird'
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
result = trex_empty.eec_dose_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_bird(self):
"""
unit test for function arq_dose_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_bird' are correctly implemented
* methods called inside of 'arq_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.007014, 1.146429, 0.02478172], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = | pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object') | pandas.Series |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Clean the raw quantities from the dynamics analysis.
This is a collection of utilities for cleaning up the raw data from the calculation of
the dynamics.
"""
import logging
import sys
from pathlib import Path
from typing import Any, Dict, List, Tuple
import click
import numpy as np
import pandas as pd
import scipy.stats
import sdanalysis
from sdanalysis.relaxation import series_relaxation_value
from .util import normalised_temperature
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _read_temperatures(filename: Path) -> Dict[float, float]:
"""Read temperatures from a CSV file and format for simple translation.
Args:
filename: An input file which contains the melting points for each pressure.
"""
df = | pd.read_csv(filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
# python 2/3 compatibility
try:
basestring
except NameError:
basestring = str
import string
import os
import copy
import sys
import pandas as pds
import numpy as np
import xarray as xr
from . import _custom
from . import _files
from . import _orbits
from . import _meta
from . import utils
from pysat import data_dir
from pysat import DataFrame, Series
# main class for users
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of platform/satellite.
name : string
name of instrument.
tag : string, optional
identifies particular subset of instrument data.
sat_id : string, optional
identity within constellation
clean_level : {'clean','dusty','dirty','none'}, optional
level of data quality
pad : pandas.DateOffset, or dictionary, optional
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset.
orbit_info : dict
Orbit information, {'index':index, 'kind':kind, 'period':period}.
See pysat.Orbits for more information.
inst_module : module, optional
Provide instrument module directly.
Takes precedence over platform/name.
update_files : boolean, optional
If True, immediately query filesystem for instrument files and store.
temporary_file_list : boolean, optional
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
multi_file_day : boolean, optional
Set to True if Instrument data files for a day are spread across
multiple files and data for day n could be found in a file
with a timestamp of day n-1 or n+1.
manual_org : bool
if True, then pysat will look directly in pysat data directory
for data files and will not use default /platform/name/tag
directory_format : str
directory naming structure in string format. Variables such as
platform, name, and tag will be filled in as needed using python
string formatting. The default directory structure would be
expressed as '{platform}/{name}/{tag}'
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and sat_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine.
units_label : str
String used to label units in storage. Defaults to 'units'.
name_label : str
String used to label long_name in storage. Defaults to 'name'.
notes_label : str
label to use for notes in storage. Defaults to 'notes'
desc_label : str
label to use for variable descriptions in storage. Defaults to 'desc'
plot_label : str
label to use to label variables in plots. Defaults to 'label'
axis_label : str
label to use for axis on a plot. Defaults to 'axis'
scale_label : str
label to use for plot scaling type in storage. Defaults to 'scale'
min_label : str
label to use for typical variable value min limit in storage.
Defaults to 'value_min'
max_label : str
label to use for typical variable value max limit in storage.
Defaults to 'value_max'
fill_label : str
label to use for fill values. Defaults to 'fill' but some implementations
will use 'FillVal'
Attributes
----------
data : pandas.DataFrame
loaded science data
date : pandas.datetime
date for loaded data
yr : int
year for loaded data
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
custom : pysat.Custom
interface to instrument nano-kernel
kwargs : dictionary
keyword arguments passed to instrument loading routine
Note
----
Pysat attempts to load the module platform_name.py located in
the pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data.
Alternatively, the module may be supplied directly
using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation
cosmic = pysat.Instrument('cosmic2013',
'gps',
'ionprf',
altitude_bin=3)
# bins profile using 3 km step
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
"""
def __init__(self, platform=None, name=None, tag=None, sat_id=None,
clean_level='clean', update_files=None, pad=None,
orbit_info=None, inst_module=None, multi_file_day=None,
manual_org=None, directory_format=None, file_format=None,
temporary_file_list=False, units_label='units',
name_label='long_name', notes_label='notes', desc_label='desc',
plot_label='label', axis_label='axis', scale_label='scale',
min_label='value_min', max_label='value_max',
fill_label = 'fill', *arg, **kwargs):
if inst_module is None:
# use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# look to module for instrument functions and defaults
self._assign_funcs(by_name=True)
elif (platform is None) and (name is None):
# creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_funcs()
else:
raise ValueError('Inputs platform and name must both be ' +
'strings, or both None.')
else:
# user has provided a module
try:
# platform and name are expected to be part of module
self.name = inst_module.name.lower()
self.platform = inst_module.platform.lower()
except AttributeError:
raise AttributeError(string.join(('A name and platform ',
'attribute for the ',
'instrument is required if ',
'supplying routine module ',
'directly.')))
# look to module for instrument functions and defaults
self._assign_funcs(inst_module=inst_module)
# more reasonable defaults for optional parameters
self.tag = tag.lower() if tag is not None else ''
self.sat_id = sat_id.lower() if sat_id is not None else ''
self.clean_level = (clean_level.lower() if clean_level is not None
else 'none')
# assign_func sets some instrument defaults, direct info rules all
if directory_format is not None:
self.directory_format = directory_format.lower()
# value not provided by user, check if there is a value provided by
# instrument module
elif self.directory_format is not None:
try:
# check if it is a function
self.directory_format = self.directory_format(tag, sat_id)
except TypeError:
pass
if file_format is not None:
self.file_format = file_format
# check to make sure value is reasonable
if self.file_format is not None:
# check if it is an iterable string. If it isn't formatted
# properly, raise Error
if (not isinstance(self.file_format, str) or
(self.file_format.find("{") < 0) or
(self.file_format.find("}") < 0)):
estr = 'file format set to default, supplied string must be '
estr = '{:s}iteratable [{:}]'.format(estr, self.file_format)
raise ValueError(estr)
# set up empty data and metadata
# check if pandas or xarray format
if self.pandas_format:
self._null_data = DataFrame(None)
self._data_library = DataFrame
else:
self._null_data = xr.Dataset(None)
self._data_library = xr.Dataset
self.data = self._null_data.copy()
# create Meta instance with appropriate labels
self.units_label = units_label
self.name_label = name_label
self.notes_label = notes_label
self.desc_label = desc_label
self.plot_label = plot_label
self.axis_label = axis_label
self.scale_label = scale_label
self.min_label = min_label
self.max_label = max_label
self.fill_label = fill_label
self.meta = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label=self.notes_label,
desc_label=self.desc_label,
plot_label=self.plot_label,
axis_label=self.axis_label,
scale_label=self.scale_label,
min_label=self.min_label,
max_label=self.max_label,
fill_label=self.fill_label)
# function processing class, processes data on load
self.custom = _custom.Custom()
# create arrays to store data around loaded day
# enables padding across day breaks with minimal loads
self._next_data = self._null_data.copy()
self._next_data_track = []
self._prev_data = self._null_data.copy()
self._prev_data_track = []
self._curr_data = self._null_data.copy()
# multi file day, default set by assign_funcs
if multi_file_day is not None:
self.multi_file_day = multi_file_day
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# if default info not provided, set None as default
orbit_info = {'index': None, 'kind': None, 'period': None}
else:
# default provided by instrument module
orbit_info = self.orbit_info
self.orbits = _orbits.Orbits(self, **orbit_info)
# Create empty placeholder for meta translation table
# gives information about how to label metadata for netcdf export
# if None, pysat metadata labels will be used
self._meta_translation_table = None
# Create a placeholder for a post-processing function to be applied
# to the metadata dictionary before export. If None, no post-processing
# will occur
self._export_meta_post_processing = None
# store kwargs, passed to load routine
self.kwargs = kwargs
# run instrument init function, a basic pass function is used
# if user doesn't supply the init function
self._init_rtn(self)
# store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if self.pandas_format:
if isinstance(key, tuple):
# support slicing
return self.data.ix[key[0], key[1]]
else:
try:
# integer based indexing
return self.data.iloc[key]
except:
try:
# let pandas sort it out, presumption is key is
# a variable name, or iterable of variables
return self.data[key]
except:
estring = '\n'.join(("Unable to sort out data access.",
"Instrument has data : " +
str(not self.empty),
"Requested key : ", key))
raise ValueError(estring)
else:
return self.__getitem_xarray__(key)
def __getitem_xarray__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if 'time' not in self.data:
return xr.Dataset(None)
if isinstance(key, tuple):
if len(key) == 2:
# support slicing time, variable name
try:
return self.data.isel(time=key[0])[key[1]]
except:
return self.data.sel(time=key[0])[key[1]]
else:
# multidimensional indexing
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
return self.data[key[-1]][indict]
else:
try:
# grab a particular variable by name
return self.data[key]
except:
# that didn't work
try:
# get all data variables but for a subset of time
# using integer indexing
return self.data.isel(time=key)
except:
# subset of time, using label based indexing
return self.data.sel(time=key)
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
# add data to main pandas.DataFrame, depending upon the input
# aka slice, and a name
if self.pandas_format:
if isinstance(key, tuple):
self.data.ix[key[0], key[1]] = new
self.meta[key[1]] = {}
return
elif not isinstance(new, dict):
# make it a dict to simplify downstream processing
new = {'data': new}
# input dict must have data in 'data',
# the rest of the keys are presumed to be metadata
in_data = new.pop('data')
if hasattr(in_data, '__iter__'):
if isinstance(in_data, pds.DataFrame):
pass
# filter for elif
elif isinstance(next(iter(in_data), None), pds.DataFrame):
# input is a list_like of frames
# this is higher order data
# this process ensures
if ('meta' not in new) and (key not in self.meta.keys_nD()):
# create an empty Meta instance but with variable names
# this will ensure the correct defaults for all
# subvariables. Meta can filter out empty metadata as
# needed, the check above reducesthe need to create
# Meta instances
ho_meta = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label=self.notes_label,
desc_label=self.desc_label,
plot_label=self.plot_label,
axis_label=self.axis_label,
scale_label=self.scale_label,
fill_label=self.fill_label,
min_label=self.min_label,
max_label=self.max_label)
ho_meta[in_data[0].columns] = {}
self.meta[key] = ho_meta
# assign data and any extra metadata
self.data[key] = in_data
self.meta[key] = new
else:
# xarray format chosen for Instrument object
if not isinstance(new, dict):
new = {'data': new}
in_data = new.pop('data')
if isinstance(key, tuple):
# user provided more than one thing in assignment location
# something like, index integers and a variable name
# self[idx, 'variable'] = stuff
# or, self[idx1, idx2, idx3, 'variable'] = stuff
# construct dictionary of dimensions and locations for
# xarray standards
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
# if dim == 'time':
# indict[dim] = self.index[key[i]]
try:
self.data[key[-1]].loc[indict] = in_data
except:
indict['time'] = self.index[indict['time']]
self.data[key[-1]].loc[indict] = in_data
self.meta[key[-1]] = new
return
elif isinstance(key, basestring):
# assigning basic variable
# if xarray input, take as is
if isinstance(in_data, xr.DataArray):
self.data[key] = in_data
# ok, not an xarray input
# but if we have an iterable input, then we
# go through here
elif len(np.shape(in_data)) == 1:
# looking at a 1D input here
if len(in_data) == len(self.index):
# 1D input has the correct length for storage along
# 'time'
self.data[key] = ('time', in_data)
elif len(in_data) == 1:
# only provided a single number in iterable, make that
# the input for all times
self.data[key] = ('time', [in_data[0]]*len(self.index))
elif len(in_data) == 0:
# provided an empty iterable
# make everything NaN
self.data[key] = ('time', [np.nan]*len(self.index))
# not an iterable input
elif len(np.shape(in_data)) == 0:
# not given an iterable at all, single number
# make that number the input for all times
self.data[key] = ('time', [in_data]*len(self.index))
else:
# multidimensional input that is not an xarray
# user needs to provide what is required
if isinstance(in_data, tuple):
self.data[key] = in_data
else:
raise ValueError('Must provide dimensions for xarray ' +
'multidimensional data using input ' +
'tuple.')
elif hasattr(key, '__iter__'):
# multiple input strings (keys) are provided, but not in tuple
# form recurse back into this function, setting each
# input individually
for keyname in key:
self.data[keyname] = in_data[keyname]
# attach metadata
self.meta[key] = new
@property
def empty(self):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
if self.pandas_format:
return self.data.empty
else:
if 'time' in self.data.indexes:
return len(self.data.indexes['time']) == 0
else:
return True
def _empty(self, data=None):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
if data is None:
data = self.data
if self.pandas_format:
return data.empty
else:
if 'time' in data.indexes:
return len(data.indexes['time']) == 0
else:
return True
@property
def index(self):
"""Returns time index of loaded data."""
if self.pandas_format:
return self.data.index
else:
if 'time' in self.data.indexes:
return self.data.indexes['time']
else:
return pds.Index([])
def _index(self, data=None):
"""Returns time index of loaded data."""
if data is None:
data = self.data
if self.pandas_format:
return data.index
else:
if 'time' in data.indexes:
return data.indexes['time']
else:
return pds.Index([])
@property
def variables(self):
"""Returns list of variables within loaded data."""
if self.pandas_format:
return self.data.columns
else:
return list(self.data.variables.keys())
def copy(self):
"""Deep copy of the entire Instrument object."""
return copy.deepcopy(self)
def concat_data(self, data, *args, **kwargs):
"""Concats data1 and data2 for xarray or pandas as needed"""
if self.pandas_format:
return pds.concat(data, *args, **kwargs)
else:
return xr.concat(data, dim='time')
def _pass_func(*args, **kwargs):
pass
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object.
"""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
self.pandas_format = True
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)),
package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
try:
self.pandas_format = inst.pandas_format
except AttributeError:
pass
return
def __str__(self):
output_str = '\npysat Instrument object\n'
output_str += '-----------------------\n'
output_str += 'Platform: '+self.platform+'\n'
output_str += 'Name: '+self.name+'\n'
output_str += 'Tag: '+self.tag+'\n'
output_str += 'Satellite id: '+self.sat_id+'\n'
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += 'Cleaning Level: ' + self.clean_level + '\n'
output_str += 'Data Padding: ' + self.pad.__repr__() + '\n'
output_str += 'Keyword Arguments Passed to load(): '
output_str += self.kwargs.__repr__() +'\nCustom Functions : \n'
if len(self.custom._functions) > 0:
for func in self.custom._functions:
output_str += ' ' + func.__repr__() + '\n'
else:
output_str += ' ' + 'No functions applied.\n'
output_str += '\nOrbit Settings' + '\n'
output_str += '--------------' + '\n'
if self.orbit_info is None:
output_str += 'Orbit properties not set.\n'
else:
output_str += 'Orbit Kind: ' + self.orbit_info['kind'] + '\n'
output_str += 'Orbit Index: ' + self.orbit_info['index'] + '\n'
output_str += 'Orbit Period: '
output_str += self.orbit_info['period'].__str__() + '\n'
output_str += 'Number of Orbits: {:d}\n'.format(self.orbits.num)
output_str += 'Loaded Orbit Number: '
if self.orbits.current is not None:
output_str += '{:d}\n'.format(self.orbits.current)
else:
output_str += 'None\n'
output_str += '\nLocal File Statistics' + '\n'
output_str += '---------------------' + '\n'
output_str += 'Number of files: ' + str(len(self.files.files)) + '\n'
if len(self.files.files) > 0:
output_str += 'Date Range: '
output_str += self.files.files.index[0].strftime('%m/%d/%Y')
output_str += ' --- '
output_str += self.files.files.index[-1].strftime('%m/%d/%Y')
output_str += '\n\nLoaded Data Statistics'+'\n'
output_str += '----------------------'+'\n'
if not self.empty:
# if self._fid is not None:
# output_str += 'Filename: ' +
output_str += 'Date: ' + self.date.strftime('%m/%d/%Y') + '\n'
output_str += 'DOY: {:03d}'.format(self.doy) + '\n'
output_str += 'Time range: '
output_str += self.index[0].strftime('%m/%d/%Y %H:%M:%S')
output_str += ' --- '
output_str += self.index[-1].strftime('%m/%d/%Y %H:%M:%S')+'\n'
output_str += 'Number of Times: ' + str(len(self.index)) + '\n'
output_str += 'Number of variables: ' + str(len(self.variables))
output_str += '\n\nVariable Names:'+'\n'
num = len(self.variables)//3
for i in np.arange(num):
output_str += self.variables[3 * i].ljust(30)
output_str += self.variables[3 * i + 1].ljust(30)
output_str += self.variables[3 * i + 2].ljust(30)+'\n'
for i in np.arange(len(self.variables) - 3 * num):
output_str += self.variables[i+3*num].ljust(30)
output_str += '\n'
else:
output_str += 'No loaded data.'+'\n'
output_str += '\n'
return output_str
def _filter_datetime_input(self, date):
"""
Returns datetime that only includes year, month, and day.
Parameters
----------
date : datetime
Returns
-------
datetime
Only includes year, month, and day from original input
"""
return pds.datetime(date.year, date.month, date.day)
def today(self):
"""Returns today's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Today's date
"""
return self._filter_datetime_input(pds.datetime.today())
def tomorrow(self):
"""Returns tomorrow's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Tomorrow's date
"""
return self.today()+pds.DateOffset(days=1)
def yesterday(self):
"""Returns yesterday's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Yesterday's date
"""
return self.today()-pds.DateOffset(days=1)
def _load_data(self, date=None, fid=None):
"""
Load data for an instrument on given date or fid, dependng upon input.
Parameters
----------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
Returns
--------
data : (pds.DataFrame)
pysat data
meta : (pysat.Meta)
pysat meta data
"""
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self)
else:
data = self._null_data.copy()
mdata = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label = self.notes_label,
desc_label = self.desc_label,
plot_label = self.plot_label,
axis_label = self.axis_label,
scale_label = self.scale_label,
min_label = self.min_label,
max_label = self.max_label,
fill_label=self.fill_label)
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
# check that data and metadata are the data types we expect
if not isinstance(data, self._data_library):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a', self._data_library)))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
# let user know if data was returned or not
if len(data) > 0:
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%x')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%m/%d/%y')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1)
def _load_prev(self):
"""Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - pds.DateOffset(days=1)
return self._load_data(date=prev_date)
else:
return self._load_data(fid=self._fid-1)
def _set_load_parameters(self, date=None, fid=None):
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
# ensure date portion from user is only year, month, day
self._set_load_parameters(date=self._filter_datetime_input(date),
fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._empty(self._next_data) & self._empty(self._prev_data):
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._index(self._prev_data).is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._index(self._curr_data).is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._index(self._next_data).is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._empty(self._curr_data):
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = self._null_data.copy()
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._index(self._curr_data)[0]
first_pad = first_time - loop_pad
last_time = self._index(self._curr_data)[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are " +
"effectively equivalent. Can't have " +
"multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._empty(self._prev_data)) & (not self.empty):
stored_data = self.data #.copy()
temp_time = copy.deepcopy(self.index[0])
# pad data using access mechanisms that works
# for both pandas and xarray
self.data = self._prev_data.copy()
# __getitem__ used below to get data
# from instrument object. Details
# for handling pandas and xarray are different
# and handled by __getitem__
self.data = self[first_pad : temp_time]
if not self.empty:
if (self.index[-1] == temp_time) :
self.data = self[:-1]
self.data = self.concat_data([self.data, stored_data])
else:
self.data = stored_data
if (not self._empty(self._next_data)) & (not self.empty):
stored_data = self.data #.copy()
temp_time = copy.deepcopy(self.index[-1])
# pad data using access mechanisms that work
# for both pandas and xarray
self.data = self._next_data.copy()
self.data = self[temp_time : last_pad]
if not self.empty:
if (self.index[0] == temp_time) :
self.data = self[1:]
self.data = self.concat_data([stored_data, self.data])
else:
self.data = stored_data
self.data = self[first_pad : last_pad]
# want exclusive end slicing behavior from above
if not self.empty:
if (self.index[-1] == last_pad) & (not want_last_pad):
self.data = self[:-1]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.variables] = {self.name_label: self.variables,
self.units_label: [''] *
len(self.variables)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.index[0]
self.date = | pds.datetime(temp.year, temp.month, temp.day) | pandas.datetime |
# 导入类库
import os.path
import os
import datetime
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from gensim.models import Word2Vec
from pandas import read_csv
import re
import pandas as pd
import numpy as np
import itertools
import sys
def get_dict(file): #该函数可以获得蛋白质ID与其序列对应的字典或
if file=='df_molecule.csv':
fig_dict=pd.read_csv(file)[['Molecule_ID','Fingerprint']].T.to_dict('series')
elif file=='df_protein_train.csv' or file=='df_protein_test.csv' :
pro=open(file,'r').read().upper() #将蛋白质序列文件中的小写改为大写字母
pro_out=open(file,'w')
pro_out.write(pro)
pro_out.close()
fig_dict=pd.read_csv(file)[['PROTEIN_ID','SEQUENCE']].T.to_dict('series')
else:
print('文件格式错误')
sys.exit()
return fig_dict
def get_new_pro(id_pro, pramate_file): #该函数可以获得蛋白质序列进行数字化处理后的矩阵
pro_result={}
for key,valuex in id_pro.items():
value=list(valuex)[-1]
length=len(value)
pro_mol={'G':75.07,'A':89.09,'V':117.15,'L':131.17,'I':131.17,'F':165.19,'W':204.23,'Y':181.19,'D':133.10,'N':132.12,'E':147.13,'K':146.19,'Q':146.15,'M':149.21,'S':105.09,'T':119.12,'C':121.16,'P':115.13,'H':155.16,'R':174.20}
pramate_file_dict = pd.read_csv(pramate_file, index_col='aa').T.to_dict('series')
pro_n_8_maxitic=np.array([pramate_file_dict[value[0]],pramate_file_dict[value[1]]])
pro_line=np.array([pro_mol[value[0]],pro_mol[value[1]]])
for i in value[2:]:
pro_n_8_maxitic=np.row_stack((pro_n_8_maxitic,pramate_file_dict[i])) #得到n*属性 的计算矩阵
pro_line= np.append(pro_line,pro_mol[i])
Lag=list(np.dot(pro_line,pro_n_8_maxitic)/float(length))
Lag=[ str(i) for i in Lag ]
pro_result[str(key)] =str(key)+','+','.join(Lag)
return pro_result
def get_AC_figuer(file_fig_dict): #该函数可以获得分子指纹进行数字化处理后的矩阵
fig = []
for i in itertools.product('01', repeat=8):
fig.append(''.join(list(i)))
out={}
for k, vx in file_fig_dict.items():
fig_nu_dict = {}
v=''.join([ str(i) for i in list(vx)[1:] ]).replace(', ','')
s = 0
e = 8
for ii in range(len(v) - 7):
read = v[s:e]
if read in fig_nu_dict:
fig_nu_dict[read] = fig_nu_dict[read] + 1
else:
fig_nu_dict[read] = 1
s = s + 1
e = e + 1
fig_list=[]
for i in fig:
if i in fig_nu_dict:
fig_list.append(str(fig_nu_dict[i]))
else:
fig_list.append('0')
out[str(k)]=str(k)+','+','.join(fig_list)
return out
def merge_file(new_fig,new_pro,pro_mol_id_file,out_file): #该函数将蛋白质序列数字矩阵,分子指纹矩阵,小分子18个属性进行融合
df=pd.read_csv(pro_mol_id_file)
new_pro=pd.read_csv('new_pro.list',sep='\t')
new_fig=pd.read_csv('new_fig.list',sep='\t')
nu_18=pd.read_csv('df_molecule.csv')[['Molecule_ID','cyp_3a4','cyp_2c9','cyp_2d6','ames_toxicity','fathead_minnow_toxicity','tetrahymena_pyriformis_toxicity','honey_bee','cell_permeability','logP','renal_organic_cation_transporter','CLtotal','hia','biodegradation','Vdd','p_glycoprotein_inhibition','NOAEL','solubility','bbb']]
df['Protein_ID']=df['Protein_ID'].astype(int)
result=pd.merge(new_pro,df,on='Protein_ID')
result=pd.merge(new_fig, result, on='Molecule_ID')
result= | pd.merge(nu_18, result, on='Molecule_ID') | pandas.merge |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import os
import pickle
import pandas as pd
import seaborn as sns
import argparse
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--result_dir', type=str,
default='logs/figure_toy_nullspace_frozen',
help='Directory with the result data from the nullspace'
'experiment.')
args = parser.parse_args()
result_dir = args.result_dir
filename = os.path.join(result_dir, 'result_dict.pickle')
with open(filename, 'rb') as f:
result_dict = pickle.load(f)
legend = ['DTP', 'DDTP-linear \n (ours)']
# legend = ['DTP', 'Ours']
layer_idx = 1
result_dict_null = result_dict['nullspace_relative_norm_angles']
result_dict_null_DTP = result_dict_null['DTP_pretrained']
result_dict_null_DDTPlin = result_dict_null['DMLPDTP2_linear']
# append the dataframes with labels
result_dict_null_DTP.insert(result_dict_null_DTP.shape[1], 'type',
[legend[0] for i in
range(result_dict_null_DTP.shape[0])])
result_dict_null_DDTPlin.insert(result_dict_null_DDTPlin.shape[1], 'type',
[legend[1] for i in
range(result_dict_null_DDTPlin.shape[0])])
result_dict_joined = | pd.concat([result_dict_null_DTP, result_dict_null_DDTPlin]) | pandas.concat |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
| tm.assert_index_equal(rng, exp) | pandas.util.testing.assert_index_equal |
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
)
from .pandas_vb_common import tm
try:
from pandas.tseries.offsets import (
Hour,
Nano,
)
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import (
Hour,
Nano,
)
class FromDicts:
def setup(self):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
self.data = frame.to_dict()
self.dict_list = frame.to_dict(orient="records")
self.data2 = {i: {j: float(j) for j in range(100)} for i in range(2000)}
# arrays which we wont consolidate
self.dict_of_categoricals = {i: Categorical(np.arange(N)) for i in range(K)}
def time_list_of_dict(self):
DataFrame(self.dict_list)
def time_nested_dict(self):
DataFrame(self.data)
def time_nested_dict_index(self):
DataFrame(self.data, index=self.index)
def time_nested_dict_columns(self):
DataFrame(self.data, columns=self.columns)
def time_nested_dict_index_columns(self):
DataFrame(self.data, index=self.index, columns=self.columns)
def time_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
def time_dict_of_categoricals(self):
# dict of arrays that we wont consolidate
DataFrame(self.dict_of_categoricals)
class FromSeries:
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_mi_series(self):
DataFrame(self.s)
class FromDictwithTimestamp:
params = [Nano(1), | Hour(1) | pandas.core.datetools.Hour |
import pandas as pd
import warnings
from thermostat_nw.columns import EXPORT_COLUMNS, CERTIFICATION_HEADERS
warnings.simplefilter("module", Warning)
COLUMN_LOOKUP = {
"percent_savings_baseline_percentile_lower_bound_95_perc_conf_national_weighted_mean": {
"metric": "percent_savings_baseline_percentile",
"statistic": "lower_bound_95",
},
"percent_savings_baseline_percentile_q20_national_weighted_mean": {
"metric": "percent_savings_baseline_percentile",
"statistic": "q20",
},
"rhu2IQFLT_30F_to_45F_upper_bound_95_perc_conf": {
"metric": "rhu_30F_to_45F",
"statistic": "upper_bound_95",
},
}
FILTER_LOOKUP = {
"national_weighted_mean_heating_tau_cvrmse_savings_p01_filter": {
"season": "heating",
"region": "national_weighted_mean",
"filter": "tau_cvrmse_savings_p01",
},
"national_weighted_mean_cooling_tau_cvrmse_savings_p01_filter": {
"season": "cooling",
"region": "national_weighted_mean",
"filter": "tau_cvrmse_savings_p01",
},
"all_tau_cvrmse_savings_p01_filter_heating": {
"season": "heating",
"region": "all",
"filter": "tau_cvrmse_savings_p01",
},
}
DATA_COLUMNS = [
[
"national_weighted_mean_heating_tau_cvrmse_savings_p01_filter",
"percent_savings_baseline_percentile_lower_bound_95_perc_conf_national_weighted_mean",
],
[
"national_weighted_mean_cooling_tau_cvrmse_savings_p01_filter",
"percent_savings_baseline_percentile_lower_bound_95_perc_conf_national_weighted_mean",
],
[
"national_weighted_mean_heating_tau_cvrmse_savings_p01_filter",
"percent_savings_baseline_percentile_q20_national_weighted_mean",
],
[
"national_weighted_mean_cooling_tau_cvrmse_savings_p01_filter",
"percent_savings_baseline_percentile_q20_national_weighted_mean",
],
[
"all_tau_cvrmse_savings_p01_filter_heating",
"rhu2IQFLT_30F_to_45F_upper_bound_95_perc_conf",
],
]
def metrics_to_csv(metrics, filepath):
"""Writes metrics outputs to the file specified.
Parameters
----------
metrics : list of dict
list of outputs from the function
`thermostat.calculate_epa_draft_rccs_field_savings_metrics()`
filepath : str
filepath specification for location of output CSV file.
Returns
-------
df : pd.DataFrame
DataFrame containing data output to CSV.
"""
output_dataframe = pd.DataFrame(metrics, columns=EXPORT_COLUMNS)
output_dataframe.to_csv(filepath, index=False, columns=EXPORT_COLUMNS)
return output_dataframe
def certification_to_csv(stats, filepath, product_id):
"""Writes certification outputs to the file specified.
Parameters
----------
stats : list of dict
list of statistical outputs from the function
`thermostat.compute_summary_statistics()`
filepath : str
filepath specification for location of output CSV file.
Returns
-------
df : pd.DataFrame
DataFrame containing data output to CSV.
"""
if stats is None:
warnings.warn("No certification data to export.")
return None
labels = [i.get("label") for i in stats]
sw_version = stats[labels.index("all_tau_cvrmse_savings_p01_filter_heating")][
"sw_version"
]
certification_data = []
for column_filter, column_data in DATA_COLUMNS:
stats_column_number = labels.index(column_filter)
value = stats[stats_column_number].get(column_data, None)
row = [
product_id,
sw_version,
COLUMN_LOOKUP[column_data]["metric"],
FILTER_LOOKUP[column_filter]["filter"],
FILTER_LOOKUP[column_filter]["region"],
COLUMN_LOOKUP[column_data]["statistic"],
FILTER_LOOKUP[column_filter]["season"],
value,
]
certification_data.append(row)
output_dataframe = | pd.DataFrame(certification_data, columns=CERTIFICATION_HEADERS) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DatetimeIndex,
IntervalIndex,
NaT,
Period,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDropna:
def test_dropna_empty(self):
ser = Series([], dtype=object)
assert len(ser.dropna()) == 0
return_value = ser.dropna(inplace=True)
assert return_value is None
assert len(ser) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
ser.dropna(axis=1)
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
return_value = ts.dropna(inplace=True)
assert return_value is None
assert ts.name == name
def test_dropna_no_nan(self):
for ser in [
Series([1, 2, 3], name="x"),
Series([False, True, False], name="x"),
]:
result = ser.dropna()
tm.assert_series_equal(result, ser)
assert result is not ser
s2 = ser.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, ser)
def test_dropna_intervals(self):
ser = Series(
[np.nan, 1, 2, 3],
| IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]) | pandas.IntervalIndex.from_arrays |
import numpy as np
import pandas as pd
import keycode
FALLBACK_WEIGHT = 0.25 # weight of fallback observations
M_MIN_FREQUENCY = 3 # min frequency per sample for feature fallback
OUTLIER_DISTANCE = 2 # outliers outside +/- std devs
OUTLIER_ITERATIONS = 2 # no. iterations to do recursive outlier removal
def transition_digrams(df, distance=1):
a = df.groupby(['user', 'session']).apply(lambda x: x[:-distance].reset_index())
b = df.groupby(['user', 'session']).apply(lambda x: x[distance:].reset_index())
a = a[['user', 'session', 'keyname', 'timepress', 'timerelease']]
b = b[['keyname', 'timepress', 'timerelease']]
a.columns = ['user', 'session', 'keyname_1', 'timepress_1', 'timerelease_1']
b.columns = ['keyname_2', 'timepress_2', 'timerelease_2']
joined = pd.concat([a, b], join='inner', axis=1)
cols = ['user', 'session', 'keynames', 'transition']
# Create columns for each transition type
t1 = pd.DataFrame({'user': joined['user'],
'session': joined['session'],
'keynames': joined['keyname_1'] + '__' + joined['keyname_2'],
'transition': joined['timepress_2'] - joined['timerelease_1']},
columns=cols, index=joined.index)
t2 = pd.DataFrame({'user': joined['user'],
'session': joined['session'],
'keynames': joined['keyname_1'] + '__' + joined['keyname_2'],
'transition': joined['timepress_2'] - joined['timepress_1']},
columns=cols, index=joined.index)
return t1, t2
def outlier_removal_recursive(df, col, std_distance=OUTLIER_DISTANCE, max_iterations=OUTLIER_ITERATIONS):
'''
Remove duration outliers on a per-user basis
10 iterations will remove most outliers.
Does the following:
group df by user and keyname
get mean and std for each group (user/keyname combination)
filter df durations with the corresponding user/key mean and stds
This could be more efficient by testing the number of outliers removed for
each group and only recomputing the groups with more than 0 removed
'''
prev_len = np.inf
i = 0
while prev_len > len(df):
prev_len = len(df)
df = outlier_removal(df, col, std_distance=std_distance)
print('Removed %d observations' % (prev_len - len(df)))
i += 1
if max_iterations > 0 and i == max_iterations:
break
return df
def outlier_removal(df, col, std_distance=4):
'''
Remove duration outliers on a per-user basis
10 iterations will remove most outliers.
Does the following:
group df by user and keyname
get mean and std for each group (user/keyname combination)
filter df durations with the corresponding user/key mean and stds
This could be more efficient by testing the number of outliers removed for
each group and only recomputing the groups with more than 0 removed
'''
m, s = df[col].mean(), df[col].std()
lower = m - std_distance * s
upper = m + std_distance * s
df = df[(df[col].values > lower) & (df[col].values < upper)]
return df
def reverse_tree(features, hierarchy):
parents = {}
for parent, children in hierarchy.items():
for child in children:
parents[child] = parent
return parents
def extract_gaussian_features(df, group_col_name, feature_col_name, features, decisions, feature_name_prefix):
feature_vector = {}
for feature_name, feature_set in features.items():
full_feature_name = '%s%s' % (feature_name_prefix, feature_name)
obs = df.loc[df[group_col_name].isin(feature_set), feature_col_name]
if len(obs) < M_MIN_FREQUENCY and feature_name in decisions.keys():
fallback_name = decisions[feature_name]
fallback_obs = | pd.DataFrame() | pandas.DataFrame |
import requests
import pandas as pd
YOUR_API_KEY = '<KEY>'
url = 'https://maps.googleapis.com/maps/api/directions/json?origin=%f,%f&destination=%f,%f&key=' + YOUR_API_KEY
df = pd.read_pickle('vis_graph.pkl')
plac_df = pd.read_pickle('place_details.pkl')
places_list = list(df.index)
DIST_MATRIX_FILE = 'dist_graph.pkl'
def generate_distance_matrix():
graph = {}
for i in places_list:
graph[i] = {}
graph[i][i] = 0
for j in places_list:
if j in graph:
num = graph[j][i]
else:
num = get_dist(i, j)
graph[i][j] = num
df = | pd.DataFrame() | pandas.DataFrame |
from pandas import DataFrame
import numpy as np
from pandas.core.reshape import melt, convert_dummies
import pandas.util.testing as tm
def test_melt():
df = tm.makeTimeDataFrame()[:10]
df['id1'] = (df['A'] > 0).astype(int)
df['id2'] = (df['B'] > 0).astype(int)
molten1 = melt(df)
molten2 = melt(df, id_vars=['id1'])
molten3 = | melt(df, id_vars=['id1', 'id2']) | pandas.core.reshape.melt |
# Note:
## What does this function do?:
### Changes the region in facebook adset data from state to city.
# For example: Region in Facebook adset gets takes as "Karnataka". This code maps it to "Bangalore" and returns the resultant dataframe
import pandas as pd
FB_raw_data_raw = pd.read_csv(r"D:\Python\Test\FB_campaign_data\FB_data.csv", engine = "python")
State_city_mapping = | pd.read_csv(r"D:\Python\Test\Mapping\Cities mapping\State_City_mapping.csv", engine = "python") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/31 13:19
Desc: 股票指数成份股数据, 新浪有两个接口, 这里使用老接口:
新接口:http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000001
老接口:http://vip.stock.finance.sina.com.cn/corp/view/vII_NewestComponent.php?page=1&indexid=399639
"""
import math
from io import BytesIO
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from akshare.utils import demjson
def index_stock_cons_sina(symbol: str = "000300") -> pd.DataFrame:
"""
新浪新版股票指数成份页面, 目前该接口可获取指数数量较少
http://vip.stock.finance.sina.com.cn/mkt/#zhishu_000040
:param symbol: 指数代码
:type symbol: str
:return: 指数的成份股
:rtype: pandas.DataFrame
"""
if symbol == "000300":
symbol = "hs300"
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCountSimple"
params = {"node": f"{symbol}"}
r = requests.get(url, params=params)
page_num = math.ceil(int(r.json()) / 80) + 1
temp_df = pd.DataFrame()
for page in range(1, page_num):
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": "hs300",
"symbol": "",
"_s_r_a": "init",
}
r = requests.get(url, params=params)
temp_df = temp_df.append(
pd.DataFrame(demjson.decode(r.text)), ignore_index=True
)
return temp_df
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeDataSimple"
params = {
"page": 1,
"num": "3000",
"sort": "symbol",
"asc": "1",
"node": f"zhishu_{symbol}",
"_s_r_a": "setlen",
}
r = requests.get(url, params=params)
return pd.DataFrame(demjson.decode(r.text))
def index_stock_info() -> pd.DataFrame:
"""
聚宽-指数数据-指数列表
https://www.joinquant.com/data/dict/indexData
:return: 指数信息的数据框
:rtype: pandas.DataFrame
"""
index_df = | pd.read_html("https://www.joinquant.com/data/dict/indexData") | pandas.read_html |
__author__ = "<NAME>"
__license__ = "GPL"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>"]
__maintainer__ = "Md. <NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
# Importing the libraries
import pandas as pd
import glob
import os
import re
# Utility function to find the Process IDs and Process Names that are flagged
def find_malicious_logs(dataset):
# Initialize return lists
process_ids = []
process_names = []
dataset = dataset.drop(dataset[(dataset['doc_files_flag'] != 1)].index)
process_ids = dataset['process_id'].tolist()
process_names = dataset['process_name'].tolist()
return process_ids, process_names
# Utility function to find the Process IDs and Process Names that are flagged
def set_malicious_logs_labels(dataset, process_ids, process_names, ransomware_hash):
dataset['family_id'] = ransomware_hash
malicious_copy_dataset = dataset[(dataset['process_id'].isin(process_ids) & dataset['process_name'].isin(process_names))]
malicious_copy_dataset['class'] = 1
benign_copy_dataset = dataset[~(dataset['process_id'].isin(process_ids) & dataset['process_name'].isin(process_names))]
benign_copy_dataset['class'] = 0
frames = [malicious_copy_dataset, benign_copy_dataset]
return pd.concat(frames)
def generate_time_chunks(dataset, start_time, end_time, interval, ransomware_hash):
chunk_size = round((end_time - start_time) / interval)
#Create Directory
file_path = "/Time_Interval_Dataset/" + str(ransomware_hash) #+ "/" + str(round(interval / 60)) + "_mins"
try:
if(os.path.isdir(os.getcwd() + file_path) != True):
os.mkdir(os.getcwd() + file_path)
print(str(os.getcwd() + file_path) + " is created.")
file_path = file_path + "/" + str(round(interval / 60)) + "_mins"
if(os.path.isdir(os.getcwd() + file_path)) != True:
os.mkdir(os.getcwd() + file_path)
print(str(os.getcwd() + file_path) + " is created.")
except OSError:
print("Creation of the directory %s failed" % file_path)
return
file_name = str(ransomware_hash) + "_" + str(round(interval / 60)) + "_mins"
str_log = ""
start_time_index = start_time
for i in range(chunk_size):
if(i != chunk_size - 1):
end_time_index = start_time_index + interval
else:
end_time_index = end_time + 1
temp_dataset_copy = labeled_processed_data[((labeled_processed_data.pre_operation_time >= start_time_index) & (labeled_processed_data.pre_operation_time < end_time_index))]
# Dump the file
temp_dataset_copy.to_pickle(str(os.getcwd()) + str(file_path) + "/" + str(file_name) + "_" + str(i+1) + ".pkl.gz", compression='gzip')
str_log = str_log + str(i+1) + "\t" + str(start_time_index) + "\t" + str(end_time_index) + "\t" + str(temp_dataset_copy.shape) + "\n"
start_time_index = start_time + ((i + 1) * interval)
print(str_log)
with open(str(os.getcwd()) + str(file_path) + "/" + str(file_name) + ".txt", "w") as text_file:
print(str_log, file=text_file)
if __name__ == '__main__':
pwd = os.getcwd()
os.chdir('./Dataset/ransomware-irp-logs/')
# Storing the file names for all the aggregated datasets
all_filenames_aggregated = [i for i in glob.glob('*_aggregated*')]
all_filenames_aggregated = sorted(all_filenames_aggregated)
all_filenames_processed = [i for i in glob.glob('*_processed.*')]
all_filenames_processed = sorted(all_filenames_processed)
file_name_aggregated = all_filenames_aggregated[0]
file_name_processed = all_filenames_processed[0]
ransomware_hash = file_name_aggregated.split('_')[0]
"""all_filenames_labeled = [i for i in glob.glob('*labeled*')]
all_filenames_labeled = [filename.split('_')[0] for filename in all_filenames_labeled]
if ransomware_hash in all_filenames_labeled:
exit(1)"""
try:
aggegated_dataset = pd.read_csv(file_name_aggregated, compression='zip', header=0, sep=',', quotechar='"')
try:
processed_dataset = pd.read_csv(file_name_processed, compression='zip', header=0, sep=',', quotechar='"')
except:
processed_dataset = pd.read_pickle(file_name_processed, compression='zip')
except:
aggegated_dataset = pd.read_csv(file_name_aggregated, compression='gzip', header=0, sep=',', quotechar='"')
try:
processed_dataset = | pd.read_csv(file_name_processed, compression='gzip', header=0, sep=',', quotechar='"') | pandas.read_csv |
# Importing required libraries
import pandas as pd
from sklearn.utils import shuffle
def get_features(true_dataset_path, fake_dataset_path):
# Reading dataset
df_true = pd.read_csv(true_dataset_path)
df_fake = | pd.read_csv(fake_dataset_path) | pandas.read_csv |
import os
import random
import time
import pandas as pd
from pinyinlib import Pinyin
# 重置随机种子
random.seed(time.time())
# 读取原始表格
df = pd.read_table(os.path.dirname(__file__)+'/input.txt',sep='!',encoding='gbk',low_memory=False)
# 生成序号列表
serial = []
for i in range(0, df.shape[0]):
serial.append(str(i+1).zfill(6))
dfo1 = pd.DataFrame({'序号': serial})
# 生成户名列表
name = df.loc[:, '客户信息平台姓名'].values.tolist()
dfo2 = pd.DataFrame({'户名': name})
# 生成证件类型列表
dfo3 = pd.DataFrame({'证件类型': ([10]*df.shape[0])})
# 生成证件号码列表
id_number = list(map(str, df.loc[:, '客户信息平台证件号码'].values.tolist()))
dfo4 = pd.DataFrame({'证件号码': id_number})
# 生成英文名
english_name = []
for i in name:
english_name.append(Pinyin().get_pinyin_name(i, '', '', 'upper'))
dfo5 = pd.DataFrame({'英文名': english_name})
# 生成性别列表
gender = []
for i in id_number:
# 如果是身份证号码,则生成性别
if(len(i)==18):
# 男:1,女:2,未知:0
if(int(i[-2]) % 2 == 0):
gender.append(2)
else:
gender.append(1)
else:
gender.append(0)
dfo6 = pd.DataFrame({'性别': gender})
# 生成国籍/地区列表
dfo7 = pd.DataFrame({'国籍/地区': (['CN']*df.shape[0])})
# 生成固定电话列表,注意生成的固定电话不带区号键“-”
landline = []
for i in list(map(str, df.loc[:, '固话'].values.tolist())):
# 固定电话自带的情况
if(i != 'nan'):
# 将可能的区号键和小数去掉
i = i.replace('-', '').replace('.0', '')
# 如果长度为七,则插入区号0728,如果长度少一位或者多一位,则随机生成数字自动补全,分为带区号和不带区号两种情况,随机补全的电话尾部有“~”号
if(len(i) == 7):
landline.append('0728'+i)
elif(len(i) == 6 or len(i) == 10):
landline.append(i+str(random.randint(0, 9))+'~')
elif(len(i) == 8 or len(i) == 12):
landline.append(i[0:-1])
else:
landline.append(i)
# 固定电话非自带的情况
else:
# 随机生成,经查证,目前只有城镇5开头和乡镇4开头的区分,故第一位只生成4或5,第二位生成非0,其余五位随机生成,随机生成的电话尾部有“~”号
landline.append('0728'+str(random.randint(4, 5)) +
str(random.randint(1, 9))+str(random.randint(0, 99999)).zfill(5)+'~')
dfo8 = pd.DataFrame({'固定电话': landline})
# 生成手机号列表
phone_number=[]
for i in list(map(str, df.loc[:, '手机号'].values.tolist())):
# 手机号自带的情况
if(i!='nan'):
phone_number.append(i)
# 手机号不自带的情况
else:
phone_number.append('')
dfo9 = | pd.DataFrame({'手机号': phone_number}) | pandas.DataFrame |
from json import load as json_load
from pprint import pprint
from matplotlib.pyplot import subplots, tight_layout, show
from matplotlib.colors import ListedColormap
from numpy import mod
from pandas import DataFrame, set_option
set_option('display.max_columns', None)
set_option('display.precision', 3)
# Load and print scores
with open('../Data/NER/models_scores.json', 'r') as f:
scores = json_load(f)
pprint(scores)
# Build dataframes to show the scores better and print them
n_batches = 5
tr_te = ['test', 'train']
model_names = ['en_' + str(b) for b in range(n_batches)] + ['es_' + str(b) for b in range(n_batches)]
gen_metric_names = ['acc', 'ents_p', 'ents_r', 'ents_f']
entities = ['N', 'S', 'M', 'PC', 'SP', 'C']
spe_metric_names = ['p', 'r', 'f']
gen_metrics = {(met, tt): [scores[tt][mn][met] for mn in model_names]
for met in gen_metric_names for tt in tr_te}
spe_metrics = {(ent, met, tt): [scores[tt][mn]['ents_per_type'][ent][met] for mn in model_names]
for ent in entities for met in spe_metric_names for tt in tr_te}
gen = | DataFrame(gen_metrics, index=model_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import sys
import argparse
import json
import multiprocessing
import os
import time
import logging
from pandas.api.types import is_object_dtype
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
def feature_encoder(label_data):
# transform categorical columns into numerical columns
from sklearn.preprocessing import LabelEncoder
from pandas.api.types import is_object_dtype
label_con_data = label_data.copy()
gens = {}
for column in label_data.columns:
if is_object_dtype(label_data[column]):
gen_le = LabelEncoder()
gen_labels = gen_le.fit_transform(list(label_data[column]))
label_con_data.loc[:, column] = gen_labels # to label from 0
gens[column] = gen_le # save the transformer to inverse
# return a DataFrame
return label_con_data, gens
def consistency_loss(y_adapt, y_train, y):
loss1 = F.binary_cross_entropy(y_train, y)
loss2 = F.binary_cross_entropy(y_adapt, y)
loss3 = F.mse_loss(y_adapt, y_train) # 帮我confirm 下这里, OK
return loss1, loss2, loss3
def no_consistency_loss(y_adapt, y_train, y):
loss1 = F.binary_cross_entropy(y_train, y)
loss2 = F.binary_cross_entropy(y_adapt, y)
return loss1, loss2
class MLP(nn.Module):
def __init__(self, shape, multiclass=False):
super(MLP, self).__init__()
self.net = nn.Sequential(
nn.Linear(shape, 100),
nn.BatchNorm1d(100),
nn.ReLU(),
nn.Linear(100, 50),
nn.BatchNorm1d(50),
nn.ReLU(),
nn.Linear(50, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.net(x)
def train_simple(model, train_x, train_y, test_x, test_y, valid_x, valid_y, l2, epochs, lr):
model.train()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=l2)
mlp_vals = []
num = epochs/10
for epoch in range(epochs):
y_ = model(train_x)
loss = F.binary_cross_entropy(y_, train_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0 and epoch != 0:
model.eval()
print("iterator {}, Loss:{}".format(epoch, loss.data))
logging.info("iterator {}, Loss:{}".format(epoch, loss.data))
mlp_prob_valid_y = model(valid_x)
mlp_pred_valid_y = (mlp_prob_valid_y > 0.5) + 0
mlp_pred_valid_y = mlp_pred_valid_y.cpu()
mlp_val_valid = evaluation(valid_y.cpu(), mlp_pred_valid_y.cpu(), mlp_prob_valid_y.cpu())
mlp_prob_test_y = model(test_x)
mlp_pred_test_y = (mlp_prob_test_y > 0.5) + 0
mlp_pred_test_y = mlp_pred_test_y.cpu()
mlp_val_test = evaluation(test_y.cpu(), mlp_pred_test_y.cpu(), mlp_prob_test_y.cpu()) #mlp_thres, mlp_para)
mlp_vals.append(["l2={},epoch={}".format(l2, epoch)]+mlp_val_valid+mlp_val_test)
model.train()
model.eval()
return model, mlp_vals
def evaluation(y_true, y_pred, y_prob, threshold=None, parameters=None):
from sklearn import metrics
f1_score = metrics.f1_score(y_true, y_pred)
precision = metrics.precision_score(y_true, y_pred)
recall = metrics.recall_score(y_true, y_pred)
balanced_f1_score = metrics.f1_score(y_true, y_pred, average='weighted')
balanced_precision = metrics.precision_score(y_true, y_pred, average='weighted')
balanced_recall = metrics.recall_score(y_true, y_pred, average='weighted')
return [f1_score]
def train_classifiers_simple(config, train_data, test_data):
data = pd.concat([train_data, test_data], keys=['train', 'test'])
featured_con_data, gens = feature_encoder(data)
label_column = config["label_column"]
train_data = featured_con_data.loc['train']
test_data = featured_con_data.loc['test']
X_train = train_data.drop(axis=1, columns=[label_column])
train_Y = train_data[label_column]
X_test = test_data.drop(axis=1, columns=[label_column])
test_Y = test_data[label_column]
from sklearn import preprocessing
length = int(len(X_train)/4)
scaled_test_x = preprocessing.scale(X_test)
scaled_train_x = preprocessing.scale(X_train.iloc[:length*3,:])
scaled_valid_x = preprocessing.scale(X_train.iloc[length*3:,:])
train_y = train_Y.iloc[:length*3]
valid_y = train_Y.iloc[length*3:]
print(scaled_valid_x.shape, scaled_train_x.shape, scaled_test_x.shape)
scaled_valid_x = torch.from_numpy(scaled_valid_x).float().cuda()
scaled_train_x = torch.from_numpy(scaled_train_x).float().cuda()
train_y = torch.from_numpy(train_y.values).float().cuda()
valid_y = torch.from_numpy(valid_y.values).float().cuda()
scaled_test_x = torch.from_numpy(scaled_test_x).float().cuda()
test_y = torch.from_numpy(test_Y.values).float().cuda()
mlp_vals = []
for l2 in config["l2"]:
model = MLP(config["input_shape"])
model.cuda()
model, mlp_val = train_simple(model, scaled_train_x, train_y, scaled_test_x, test_y, scaled_valid_x, valid_y, l2, config["epoch"], 0.01)
if len(mlp_vals) == 0:
mlp_vals = mlp_val
else:
mlp_vals = mlp_vals + mlp_val
return mlp_vals
def thread_run(config):
logging.basicConfig(filename=config["output"]+'_log.log', level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)
train_data = | pd.read_csv(config["train_data"]) | pandas.read_csv |
"""
1. 技术指标讲解.
微信:bitquant51
火币交易所推荐码:asd43
币安推荐码: 22795115
币安推荐链接:https://www.binance.co/?ref=22795115
Gateio交易所荐码:1100714
Bitmex交易所推荐码:SzZBil 或者 https://www.bitmex.com/register/SzZBil
代码地址: https://github.com/ramoslin02/51bitqunt
视频更新:首先在Youtube上更新,搜索51bitquant 关注我
"""
import ccxt
import numpy as np
import pandas as pd
import talib as ta
import time
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
"""
Predict Active Wave Breaking
All you need is a pre-trained model and a series of images
For example:
folder
├───images
├───img_0001.png
├───img_0002.png
├───...
├───img_000X.png
You will need to download a pre-trained model if you don't have one.
Trained on 10k samples:
https://drive.google.com/file/d/1FOXj-uJdXtyzxOVRHHuj1X8Xx0B8jaiT/view?usp=sharing
PROGRAM : predict.py
POURPOSE : classify wave breaking using a convnet
AUTHOR : <NAME>
EMAIL : <EMAIL>
V1.0 : 16/07/2020 [<NAME>]
"""
import argparse
import numpy as np
import tensorflow as tf
import pandas as pd
import pathlib
from os.path import basename
from tensorflow.keras.preprocessing.image import ImageDataGenerator
if __name__ == '__main__':
print("\nClassifiying wave breaking data, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
# input configuration file
parser.add_argument("--model", "-M",
action="store",
dest="model",
required=True,
help="Input model in .h5 format.",)
# input model
parser.add_argument("--data", "-data",
action="store",
dest="data",
required=True,
help="Input path with image data.",)
parser.add_argument("--threshold", "-trx",
action="store",
dest="TRX",
default=0.5,
required=False,
help="Probability threshold for classification.")
# output model
parser.add_argument("--output", "-o",
action="store",
dest="output",
required=True,
help="Output file (csv).",)
args = parser.parse_args()
# --- data input ---
data_dir = args.data
data_dir = pathlib.Path(data_dir)
# Fix batch_size at 1. Waste of resources but makes my life easier
BATCH_SIZE = 1
# --- model ---
model = tf.keras.models.load_model(args.model)
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
datagen = ImageDataGenerator(rescale=1. / 255.)
print("\n Fitting the teset data generator:\n")
data_gen = datagen.flow_from_directory(
directory=str(data_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), class_mode='binary')
# predict on the test data
print("\n Prediction loop:\n")
probs = []
files = []
k = 0
for step in range(data_gen.n // BATCH_SIZE):
print(" - step {} of {}".format(
step + 1, data_gen.n // BATCH_SIZE), end="\r")
# classify
X, y = data_gen.next()
yh = model.predict(X)
probs.append(yh)
# file name
fname = basename(data_gen.filenames[k])
files.append(fname)
k += 1
# predicted labels
TRX = float(args.TRX)
yhat = np.squeeze(probs)
ypred = np.zeros(yhat.shape)
ypred[yhat > TRX] = 1
# build a dataframe
df = | pd.DataFrame(ypred, columns=["label"]) | pandas.DataFrame |
from peakaboo.peak_classify import data_grouping
from peakaboo.peak_classify import cluster_classifier
import numpy as np
import pandas as pd
def test_data_grouping():
index_df = np.zeros((2, 2))
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
try:
data_grouping(index_df, height_df, fwhm_df, threshold)
except AttributeError:
pass
else:
print('Incorrect data type passed', 'Check peak_finding_master output')
index_df = pd.DataFrame()
height_df = pd.DataFrame([1, 2, 3])
fwhm_df = pd.DataFrame([4, 5, 6])
threshold = 1
t = data_grouping(index_df, height_df, fwhm_df, threshold)
assert len(t) == 0, "Index data frame is empty"
index_df = pd.DataFrame([1, 2, 3])
height_df = pd.DataFrame()
fwhm_df = | pd.DataFrame([4, 5, 6]) | pandas.DataFrame |
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from pathlib import Path
from datetime import datetime, date
# Preprocess terms for TF-IDF
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from num2words import num2words
# end of preprocess
# LDA
from gensim import corpora, models
import pyLDAvis.gensim
# print in color
from termcolor import colored
# end LDA
import pandas as pd
import geopandas
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.corpus import wordnet
# SPARQL
import sparql
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
from matplotlib_venn_wordcloud import venn3_wordcloud
# multiprocessing
# BERT
from transformers import pipeline
# LOG
import logging
from logging.handlers import RotatingFileHandler
def biotexInputBuilder(tweetsofcity):
"""
Build and save a file formated for Biotex analysis
:param tweetsofcity: dictionary of { tweets, created_at }
:return: none
"""
biotexcorpus = []
for city in tweetsofcity:
# Get all tweets for a city :
listOfTweetsByCity = [tweets['tweet'] for tweets in tweetsofcity[city]]
# convert this list in a big string of tweets by city
document = '\n'.join(listOfTweetsByCity)
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def preprocessTerms(document):
"""
Pre process Terms according to
https://towardsdatascience.com/tf-idf-for-document-ranking-from-scratch-in-python-on-real-world-dataset-796d339a4089
/!\ Be carefull : it has a long execution time
:param:
:return:
"""
def lowercase(t):
return np.char.lower(t)
def removesinglechar(t):
words = word_tokenize(str(t))
new_text = ""
for w in words:
if len(w) > 1:
new_text = new_text + " " + w
return new_text
def removestopwords(t):
stop_words = stopwords.words('english')
words = word_tokenize(str(t))
new_text = ""
for w in words:
if w not in stop_words:
new_text = new_text + " " + w
return new_text
def removeapostrophe(t):
return np.char.replace(t, "'", "")
def removepunctuation(t):
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
for i in range(len(symbols)):
data = np.char.replace(t, symbols[i], ' ')
data = np.char.replace(t, " ", " ")
data = np.char.replace(t, ',', '')
return data
def convertnumbers(t):
tokens = word_tokenize(str(t))
new_text = ""
for w in tokens:
try:
w = num2words(int(w))
except:
a = 0
new_text = new_text + " " + w
new_text = np.char.replace(new_text, "-", " ")
return new_text
doc = lowercase(document)
doc = removesinglechar(doc)
doc = removestopwords(doc)
doc = removeapostrophe(doc)
doc = removepunctuation(doc)
doc = removesinglechar(doc) # apostrophe create new single char
return doc
def biotexAdaptativeBuilderAdaptative(listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Build a input biotex file well formated at the level wanted by concatenate cities's tweets
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return:
"""
matrixAggDay = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv")
# concat date with city
matrixAggDay['city'] = matrixAggDay[['city', 'day']].agg('_'.join, axis=1)
del matrixAggDay['day']
## change index
matrixAggDay.set_index('city', inplace=True)
matrixFiltred = spatiotemporelFilter(matrix=matrixAggDay, listOfcities=listOfcities,
spatialLevel='state', period=period)
## Pre-process :Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixFiltred["city"], matrixFiltred["state"], matrixFiltred["country"], matrixFiltred["date"] = \
zip(*matrixFiltred.index.map(splitindex))
# Agregate by level
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state':
matrixFiltred = matrixFiltred.groupby('state')['tweetsList'].apply('.\n'.join).reset_index()
elif spatialLevel == 'country':
matrixFiltred = matrixFiltred.groupby('country')['tweetsList'].apply('.\n'.join).reset_index()
# Format biotex input file
biotexcorpus = []
for index, row in matrixFiltred.iterrows():
document = row['tweetsList']
biotexcorpus.append(document)
biotexcorpus.append('\n')
biotexcorpus.append("##########END##########")
biotexcorpus.append('\n')
textToSave = "".join(biotexcorpus)
corpusfilename = "elastic-UK-adaptativebiotex"
biotexcopruspath = Path('elasticsearch/analyse')
biotexCorpusPath = str(biotexcopruspath) + '/' + corpusfilename
print("\t saving file : " + str(biotexCorpusPath))
f = open(biotexCorpusPath, 'w')
f.write(textToSave)
f.close()
def ldHHTFIDF(listOfcities):
""" /!\ for testing only !!!!
Only work if nb of states = nb of cities
i.e for UK working on 4 states with their capitals...
"""
print(colored("------------------------------------------------------------------------------------------", 'red'))
print(colored(" - UNDER DEV !!! - ", 'red'))
print(colored("------------------------------------------------------------------------------------------", 'red'))
tfidfwords = pd.read_csv("elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv", index_col=0)
texts = pd.read_csv("elasticsearch/analyse/matrixAggDay.csv", index_col=1)
listOfStatesTopics = []
for i, citystate in enumerate(listOfcities):
city = str(listOfcities[i].split("_")[0])
state = str(listOfcities[i].split("_")[1])
# print(str(i) + ": " + str(state) + " - " + city)
# tfidfwords = [tfidfwords.iloc[0]]
dictionary = corpora.Dictionary([tfidfwords.loc[state]])
textfilter = texts.loc[texts.index.str.startswith(city + "_")]
corpus = [dictionary.doc2bow(text.split()) for text in textfilter.tweetsList]
# Find the better nb of topics :
## Coherence measure C_v : Normalised PointWise Mutual Information (NPMI : co-occurence probability)
## i.e degree of semantic similarity between high scoring words in the topic
## and cosine similarity
nbtopics = range(2, 35)
coherenceScore = pd.Series(index=nbtopics, dtype=float)
for n in nbtopics:
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=n)
# Compute coherence score
## Split each row values
textssplit = textfilter.tweetsList.apply(lambda x: x.split()).values
coherence = models.CoherenceModel(model=lda, texts=textssplit, dictionary=dictionary, coherence='c_v')
coherence_result = coherence.get_coherence()
coherenceScore[n] = coherence_result
# print("level: " + str(state) + " - NB: " + str(n) + " - coherence LDA: " + str(coherenceScore[n]))
# Relaunch LDA with the best nbtopic
nbTopicOptimal = coherenceScore.idxmax()
lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=nbTopicOptimal)
# save and visualisation
## save
for topic, listwords in enumerate(lda.show_topics()):
stateTopic = {'state': state}
ldaOuput = str(listwords).split(" + ")[1:]
for i, word in enumerate(ldaOuput):
# reformat lda output for each word of topics
stateTopic[i] = ''.join(x for x in word if x.isalpha())
listOfStatesTopics.append(stateTopic)
## Visualisation
try:
vis = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.save_html(vis, "elasticsearch/analyse/lda/lda-tfidf_" + str(state) + ".html")
except:
print("saving pyLDAvis failed. Nb of topics for " + state + ": " + nbTopicOptimal)
# Save file
listOfStatesTopicsCSV = pd.DataFrame(listOfStatesTopics)
listOfStatesTopicsCSV.to_csv("elasticsearch/analyse/lda/topicBySate.csv")
def wordnetCoverage(pdterms):
"""
add an additionnal column with boolean term is in wordnet
:param pdterms: pd.dataframes of terms. Must have a column with "terms" as a name
:return: pdterms with additionnal column with boolean term is in wordnet
"""
# Add a wordnet column boolean type : True if word is in wordnet, False otherwise
pdterms['wordnet'] = False
# Loop on terms and check if there are in wordnet
for index, row in pdterms.iterrows():
if len(wordnet.synsets(row['terms'])) != 0:
pdterms.at[index, 'wordnet'] = True
return pdterms
def sparqlquery(thesaurus, term):
"""
Sparql query. This methods have be factorize to be used in case of multiprocessign
:param thesaurus: which thesaurus to query ? agrovoc or mesh
:param term: term to align with thesaurus
:return: sparql result querry
"""
# Define MeSH sparql endpoint and query
endpointmesh = 'http://id.nlm.nih.gov/mesh/sparql'
qmesh = (
'PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>'
'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>'
'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
'PREFIX owl: <http://www.w3.org/2002/07/owl#>'
'PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>'
'PREFIX mesh: <http://id.nlm.nih.gov/mesh/>'
'PREFIX mesh2020: <http://id.nlm.nih.gov/mesh/2020/>'
'PREFIX mesh2019: <http://id.nlm.nih.gov/mesh/2019/>'
'PREFIX mesh2018: <http://id.nlm.nih.gov/mesh/2018/>'
''
'ask '
'FROM <http://id.nlm.nih.gov/mesh> '
'WHERE { '
' ?meshTerms a meshv:Term .'
' ?meshTerms meshv:prefLabel ?label .'
' FILTER(lang(?label) = "en").'
' filter(REGEX(?label, "^' + str(term) + '$", "i"))'
''
'}'
)
# Define agrovoc sparql endpoint and query
endpointagrovoc = 'http://agrovoc.uniroma2.it/sparql'
qagrovoc = ('PREFIX skos: <http://www.w3.org/2004/02/skos/core#> '
'PREFIX skosxl: <http://www.w3.org/2008/05/skos-xl#> '
'ask WHERE {'
'?myterm skosxl:literalForm ?labelAgro.'
'FILTER(lang(?labelAgro) = "en").'
'filter(REGEX(?labelAgro, "^' + str(term) + '(s)*$", "i"))'
'}')
# query mesh
if thesaurus == "agrovoc":
q = qagrovoc
endpoint = endpointagrovoc
elif thesaurus == "mesh":
q = qmesh
endpoint = endpointmesh
else:
raise Exception('Wrong thesaurus given')
try:
result = sparql.query(endpoint, q, timeout=30)
# Sometimes Endpoint can bug on a request.
# SparqlException raised by sparql-client if timeout is reach
# other exception (That I have not identify yet) when endpoint send non well formated answer
except:
result = "endpoint error"
return result
def agrovocCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in agrovoc
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a agrovoc column boolean type : True if terms is in Agrovoc
pdterms['agrovoc'] = False
# Loop on term
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="agrovoc"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('agrovoc', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'agrovoc'] = "Error"
elif result.hasresult():
pdterms.at[index, 'agrovoc'] = True
print("Agrovoc number of error: " + str(endpointerror))
return pdterms
def meshCoverage(pdterms):
"""
Add an additionnal column with boolean if term is in MeSH
:param pdterms: same as wordnetCoverage
:return: same as wornetCoverage
"""
# Log number of error raised by sparql endpoint
endpointerror = 0
# Add a MeSH column boolean type : True if terms is in Mesh
pdterms['mesh'] = False
# Loop on term with multiprocessing
for index, row in tqdm(pdterms.iterrows(), total=pdterms.shape[0], desc="mesh"):
# Build SPARQL query
term = row['terms']
result = sparqlquery('mesh', term)
if result == "endpoint error":
endpointerror += 1
pdterms.at[index, 'mesh'] = "Error"
elif result.hasresult():
pdterms.at[index, 'mesh'] = True
print("Mesh number of error: " + str(endpointerror))
return pdterms
def compareWithHTFIDF(number_of_term, dfToCompare, repToSave):
"""
Only used for ECIR2020 not for NLDB2021
:param number_of_term:
:param dfToCompare:
:param repToSave:
:return:
"""
# Stack / concatenate all terms from all states in one column
HTFIDFUniquedf = concatenateHTFIDFBiggestscore()[:number_of_term]
# select N first terms
dfToCompare = dfToCompare[:number_of_term]
common = pd.merge(dfToCompare, HTFIDFUniquedf, left_on='terms', right_on='terms', how='inner')
# del common['score']
common = common.terms.drop_duplicates()
common = common.reset_index()
del common['index']
common.to_csv("elasticsearch/analyse/" + repToSave + "/common.csv")
# Get what terms are specific to Adapt-TF-IDF
print(dfToCompare)
HTFIDFUniquedf['terms'][~HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])].dropna()
condition = HTFIDFUniquedf['terms'].isin(dfToCompare['terms'])
specificHTFIDF = HTFIDFUniquedf.drop(HTFIDFUniquedf[condition].index)
specificHTFIDF = specificHTFIDF.reset_index()
del specificHTFIDF['index']
specificHTFIDF.to_csv("elasticsearch/analyse/" + repToSave + "/specific-H-TFIDF.csv")
# Get what terms are specific to dfToCompare
dfToCompare['terms'][~dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])].dropna()
condition = dfToCompare['terms'].isin(HTFIDFUniquedf['terms'])
specificdfToCompare = dfToCompare.drop(dfToCompare[condition].index)
specificdfToCompare = specificdfToCompare.reset_index()
del specificdfToCompare['index']
specificdfToCompare.to_csv("elasticsearch/analyse/" + repToSave + "/specific-reference.csv")
# Print stats
percentIncommon = len(common) / len(HTFIDFUniquedf) * 100
percentOfSpecificHTFIDF = len(specificHTFIDF) / len(HTFIDFUniquedf) * 100
print("Percent in common " + str(percentIncommon))
print("Percent of specific at H-TFIDF : " + str(percentOfSpecificHTFIDF))
def HTFIDF_comparewith_TFIDF_TF():
"""
Only used for ECIR2020 not for NLDB2021
.. warnings:: /!\ under dev !!!. See TODO below
.. todo::
- Remove filter and pass it as args :
- period
- list of Cities
- Pass files path in args
- Pass number of term to extract for TF-IDF and TF
Gives commons and specifics terms between H-TFIDF and TF & TF-IDF classics
Creates 6 csv files : 3 for each classical measures :
- Common.csv : list of common terms
- specific-htfidf : terms only in H-TF-IDF
- specific-reference : terms only in one classical measurs
"""
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearchQuery()
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
col = ['tweets', 'created_at']
matrixAllTweets = pd.DataFrame(columns=col)
for tweetByCity in tweets.keys():
# pprint(tweets[tweetByCity])
# Filter cities :
if str(tweetByCity).split("_")[0] in listOfCity:
matrix = pd.DataFrame(tweets[tweetByCity])
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# NB : 28354 results instead of 44841 (from ES) because we work only on tweets with a city found
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
mask = ((matrixAllTweets["date"] >= tfidfPeriod.min()) & (matrixAllTweets["date"] <= tfidfPeriod.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
### Remove stopword
for term in TFIDFClassical.keys():
if term in stopwords.words('english'):
del TFIDFClassical[term]
# TFIDFClassical.to_csv("elasticsearch/analyse/TFIDFClassical/tfidfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggest = TFIDFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggest = extractBiggest.reset_index(level=[0, 1])
extractBiggest.columns = ['old-index', 'terms', 'score']
del extractBiggest['old-index']
extractBiggest = extractBiggest.drop_duplicates(subset='terms', keep="first")
extractBiggest.to_csv("elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFIDFClassical"
compareWithHTFIDF(200, extractBiggest, repToSave)
# Compute TF
tf = CountVectorizer()
tf.fit(matrixAllTweets['tweet'])
tf_res = tf.transform(matrixAllTweets['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### Remove stopword
for term in TFClassical.keys():
if term in stopwords.words('english'):
del TFClassical[term]
### save in file
# TFClassical.to_csv("elasticsearch/analyse/TFClassical/tfclassical.csv")
## Extract N TOP ranking score
top_n = 500
extractBiggestTF = TFClassical.stack().nlargest(top_n)
### Reset index becaus stack create a multi-index (2 level : old index + terms)
extractBiggestTF = extractBiggestTF.reset_index(level=[0, 1])
extractBiggestTF.columns = ['old-index', 'terms', 'score']
del extractBiggestTF['old-index']
extractBiggestTF = extractBiggestTF.drop_duplicates(subset='terms', keep="first")
extractBiggestTF.to_csv("elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv")
# Compare with H-TFIDF
repToSave = "TFClassical"
compareWithHTFIDF(200, extractBiggestTF, repToSave)
def concatenateHTFIDFBiggestscore():
"""
This function return a dataframe of one column containing all terms. i.e regroup all terms
:param:
:return: dataframe of 1 column with all terms from states stacked
"""
HTFIDF = pd.read_csv('elasticsearch/analyse/TFIDFadaptativeBiggestScore.csv', index_col=0)
# Transpose A-TF-IDF (inverse rows and columns)
HTFIDF = HTFIDF.transpose()
# group together all states' terms
HTFIDFUnique = pd.Series(dtype='string')
## loop on row for append states' terms in order to take into account their rank
## If there are 4 states, It will add the 4 first terms by iterow
for index, row in HTFIDF.iterrows():
HTFIDFUnique = HTFIDFUnique.append(row.transpose(), ignore_index=True)
## drop duplicate
HTFIDFUnique = HTFIDFUnique.drop_duplicates()
# merge to see what terms have in common
## convert series into dataframe before merge
HTFIDFUniquedf = HTFIDFUnique.to_frame().rename(columns={0: 'terms'})
HTFIDFUniquedf['terms'] = HTFIDFUnique
return HTFIDFUniquedf
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
### Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
## period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def compute_occurence_word_by_state():
"""
Count words for tweets aggregate by state.
For each state, we concatenate all tweets related.
Then we build a table :
- columns : all word (our vocabulary)
- row : the 4 states of UK
- cell : occurence of the word by state
:return: pd.Dataframe of occurence of word by states
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
## Compute a table : (row : state; column: occurence of each terms present in state's tweets)
es_tweets_results = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
es_tweets_results_filtred = spatiotemporelFilter(es_tweets_results, listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod)
## Aggregate by state
### Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
es_tweets_results_filtred["city"], es_tweets_results_filtred["state"], es_tweets_results_filtred["country"], \
es_tweets_results_filtred["date"] = zip(*es_tweets_results_filtred.index.map(splitindex))
es_tweets_results_filtred_aggstate = es_tweets_results_filtred.groupby("state").sum()
return es_tweets_results_filtred_aggstate
def get_tweets_by_terms(term):
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of tweets for the term
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"full_text": term
}
}
],
"minimum_should_match": 1
}
}
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched for term: " + term)
print(e)
result = ""
for hit in result['hits']['hits']:
content = hit["_source"]["full_text"]
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
tweet = {
"full_text": content,
"state": state
}
list_of_tweets.append(tweet)
return list_of_tweets
def get_nb_of_tweets_with_spatio_temporal_filter():
"""
Return tweets content containing the term for Eval 11
Warning: Only work on
- the spatial window : capital of UK
- the temporal windows : 2020-01-22 to 30
Todo:
- if you want to generelized this method at ohter spatial & temporal windows. You have to custom the
elastic serarch query.
:param term: term for retrieving tweets
:return: Dictionnary of nb of tweets by state
"""
list_of_tweets = []
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Define a Query : Here get only city from UK
query = {"query": {
"bool": {
"must": [],
"filter": [
{
"bool": {
"filter": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "London"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Glasgow"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"match_phrase": {
"rest.features.properties.city.keyword": "Belfast"
}
}
],
"minimum_should_match": 1
}
},
{
"bool": {
"should": [
{
"match": {
"rest.features.properties.city.keyword": "Cardiff"
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
}
],
"minimum_should_match": 1
}
},
]
}
},
{
"range": {
"created_at": {
"gte": "2020-01-22T23:00:00.000Z",
"lte": "2020-01-30T23:00:00.000Z",
"format": "strict_date_optional_time"
}
}
}
],
}
}
}
try:
result = Elasticsearch.search(client, index=index, body=query, size=10000)
except Exception as e:
print("Elasticsearch deamon may not be launched")
print(e)
result = ""
nb_tweets_by_state = pd.DataFrame(index=["nb_tweets"], columns=('England', 'Northern Ireland', 'Scotland', 'Wales'))
nb_tweets_by_state.iloc[0] = (0, 0, 0, 0)
list_of_unboundaries_state = []
for hit in result['hits']['hits']:
try:
state = hit["_source"]["rest"]["features"][0]["properties"]["state"]
nb_tweets_by_state[state].iloc[0] += 1
except:
state_no_uk = str(hit["_source"]["rest"]["features"][0]["properties"]["city"] + " " + state)
list_of_unboundaries_state.append(state_no_uk)
print("get_nb_of_tweets_with_spatio_temporal_filter(): List of unique location outside of UK: " + str(
set(list_of_unboundaries_state)))
return nb_tweets_by_state
def ECIR20():
# matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
"""
### Filter city and period
"""
listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
tfidfStartDate = date(2020, 1, 23)
tfidfEndDate = date(2020, 1, 30)
tfidfPeriod = pd.date_range(tfidfStartDate, tfidfEndDate)
# LDA clustering on TF-IDF adaptative vocabulary
listOfCityState = ['London_England', 'Glasgow_Scotland', 'Belfast_Northern Ireland', 'Cardiff_Wales']
ldHHTFIDF(listOfCityState)
"""
"""
## Build biotex input for adaptative level state
biotexAdaptativeBuilderAdaptative(listOfcities=listOfCity, spatialLevel='state',
period=tfidfPeriod, temporalLevel='day')
"""
# Compare Biotex with H-TFIDF
"""
biotex = pd.read_csv('elasticsearch/analyse/biotexonhiccs/biotexUKbyStates.csv',
names=['terms', 'UMLS', 'score'], sep=';')
repToSave = "biotexonhiccs"
compareWithHTFIDF(200, biotex, repToSave)
"""
# declare path for comparison H-TFIDF with TF-IDF and TF (scikit measures)
"""
tfidfpath = "elasticsearch/analyse/TFIDFClassical/TFIDFclassicalBiggestScore.csv"
tfpath = "elasticsearch/analyse/TFClassical/TFclassicalBiggestScore.csv"
"""
"""
# Compare classical TF-IDF with H-TFIDF
## HTFIDF_comparewith_TFIDF_TF() gives commun and spectific terms between H-TFIDF and TF-ISF & TF classics
HTFIDF_comparewith_TFIDF_TF()
"""
# Thesaurus coverage : Are the terms in Wordnet / Agrovoc / MeSH
## open measures results and add a column for each thesaurus
### TF-IDF
"""
tfidf = pd.read_csv(tfidfpath)
tfidf = wordnetCoverage(tfidf)
tfidf = agrovocCoverage(tfidf)
tfidf = meshCoverage(tfidf)
tfidf.to_csv(tfidfpath)
print("TF-IDF thesaurus comparison: done")
### TF
tf = pd.read_csv(tfpath)
tf = wordnetCoverage(tf)
tf = agrovocCoverage(tf)
tf = meshCoverage(tf)
tf.to_csv(tfpath)
print("TF thesaurus comparison: done")
### H-TFIDF
htfidfStackedPAth = "elasticsearch/analyse/h-tfidf-stacked-wordnet.csv"
#### Stacked H-TFIDF
htfidf = concatenateHTFIDFBiggestscore()
htfidf = wordnetCoverage(htfidf)
htfidf = agrovocCoverage(htfidf)
htfidf = meshCoverage(htfidf)
htfidf.to_csv(htfidfStackedPAth)
print("H-TFIDF thesaurus comparison: done")
"""
## Percent of Coverage : print
"""
tfidf = | pd.read_csv(tfidfpath) | pandas.read_csv |
import argparse
import glob
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit, prange
from sklearn import metrics
from utils import *
@jit(nopython=True, nogil=True, cache=True, parallel=True, fastmath=True)
def compute_tp_tn_fp_fn(y_true, y_pred):
tp = 0
tn = 0
fp = 0
fn = 0
for i in prange(y_pred.size):
tp += y_true[i] * y_pred[i]
tn += (1-y_true[i]) * (1-y_pred[i])
fp += (1-y_true[i]) * y_pred[i]
fn += y_true[i] * (1-y_pred[i])
return tp, tn, fp, fn
def compute_precision(tp, fp):
return tp / (tp + fp)
def compute_recall(tp, fn):
return tp / (tp + fn)
def compute_f1_score(precision, recall):
try:
return (2*precision*recall) / (precision + recall)
except:
return 0
def compute_fbeta_score(precision, recall, beta):
try:
return ((1 + beta**2) * precision * recall) / (beta**2 * precision + recall)
except:
return 0
def compute_accuracy(tp,tn,fp,fn):
return (tp + tn)/(tp + tn + fp + fn)
def compute_auc(GT, pred):
return metrics.roc_auc_score(GT, pred)
def compute_auprc(GT, pred):
prec, rec, thresholds = metrics.precision_recall_curve(GT, pred)
# print(prec, rec, thresholds)
plt.plot(prec, rec)
plt.show()
# return metrics.auc(prec, rec)
def compute_average_precision(GT, pred):
ratio = sum(GT)/np.size(GT)
return metrics.average_precision_score(GT, pred), ratio
def main(args):
#====== Numba compilation ======
# The 2 lines are important
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.uint8), np.array([0,1,0], dtype=np.uint8))
compute_tp_tn_fp_fn(np.array([0,0,0], dtype=np.float32), np.array([0,1,0], dtype=np.float32))
#===============================
out = args.out
if not os.path.exists(os.path.dirname(out)):
os.makedirs(os.path.dirname(out))
model_name = args.model_name
number_epochs = args.epochs
batch_size = args.batch_size
NumberFilters = args.number_filters
lr = args.learning_rate
cv_fold = args.cv_fold
model_params = ['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', 'Empty col', 'Empty col2', 'Empty col3', 'CV']
param_values = [number_epochs, batch_size, NumberFilters, lr, '', '', '', '']
Params = pd.Series(param_values, index=model_params, name='Params values')
metrics_names = ['AUPRC','AUPRC - Baseline','F1_Score','Fbeta_Score','Accuracy','Recall','Precision','CV fold']
Metrics = pd.Series(metrics_names, index=model_params, name='Model\Metrics')
if not os.path.exists(out):
Folder_Metrics = pd.DataFrame(columns = model_params)
Image_Metrics = pd.DataFrame(columns = model_params)
else:
Metrics_file = pd.ExcelFile(out)
Folder_Metrics = pd.read_excel(Metrics_file, 'Sheet1', index_col=0, header=None)
Folder_Metrics = Folder_Metrics[Folder_Metrics.columns[:8]]
Folder_Metrics.columns = model_params
Image_Metrics = pd.read_excel(Metrics_file, 'Sheet2', index_col=0, header=None)
Image_Metrics.columns = model_params
matching_values = (Folder_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Folder_Metrics = Folder_Metrics.append(pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'CV'], name='Params', index=model_params), ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Params, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(Metrics, ignore_index=False)
Folder_Metrics = Folder_Metrics.append(pd.Series(name='', dtype='object'), ignore_index=False)
matching_values = (Image_Metrics.values[:,:4] == Params.values[:4]).all(1)
if not matching_values.any():
Image_Metrics = Image_Metrics.append( | pd.Series(['Number Epochs', 'Batch Size', 'Number Filters', 'Learning Rate', '', '', '', 'File Name'], name='Params', index=model_params) | pandas.Series |
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
from pandasticsearch import Select
import pandas as pd
from elasticsearch import Elasticsearch
from espandas import Espandas
es.indices.create(
index="test7",
body={
"mappings": {
"properties": {
"location": {
"type": "geo_point"
}
}
}
}
)
dataset= | pd.read_csv('/home/doctor/PycharmProjects/tubitak/battery/spyder_codes/dataset_ready.csv', index_col=0,dtype={'active_power': float,'humidity': float,'temp': float}) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import datetime as dt
from multiprocessing import Pool
my_dir = os.getcwd()
raw_tick_dir = os.path.join(my_dir, "data/1_RawTicks")
target_folder = os.path.join(my_dir, "data/4_Ticks")
if os.path.basename(target_folder) not in os.listdir(os.path.dirname(target_folder)):
os.mkdir(target_folder)
def check_breaks(file_names):
end = dt.datetime.strptime(file_names[0][-10:-4], "%y%m%d")
breaches = []
for i in file_names[1:]:
expected_previous_end = dt.datetime.strptime(i[5:11], "%y%m%d") - dt.timedelta(days=1)
if not end == expected_previous_end:
breaches.append([end, expected_previous_end])
end = dt.datetime.strptime(i[-10:-4], "%y%m%d")
if breaches:
print("Breaches: ", breaches)
return False
return True
def get_csv(file):
global source_folder
file_path = os.path.join(source_folder, file)
df_temp = | pd.read_csv(file_path, sep=';') | pandas.read_csv |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", context="talk")
whole_data = | pd.DataFrame(columns=['repo_name', 'method_invocations', 'filter']) | pandas.DataFrame |
import pandas
import re
class Classifier():
"""
Represents an object which takes in a training set of messages
and uses the Naive Bayes to classify new messages.
"""
def __init__(self, path, label_column = "LABEL", body_column = "BODY", start_row = 0, seperator = "\t"):
self.body_column = body_column
self.label_column = label_column
self.training_set = pandas.read_csv(path, sep=seperator, header=start_row, names=[self.label_column, self.body_column])
self.vocabulary = []
self.training_set_clean = None
self._train()
def _clean_data(self):
"""
Takes in a training set and cleans it up by removing punctuation, removing multiple spaces,
and converting to lowercase.
"""
# Removes punctuation
self.training_set[self.body_column] = self.training_set[self.body_column].str.replace('\W', ' ', regex=True)
# Removes multiple spaces with one.
self.training_set[self.body_column] = self.training_set[self.body_column].str.replace('\s+', ' ', regex=True)
# Lowercase everything
self.training_set[self.body_column] = self.training_set[self.body_column].str.lower()
def _build_vocabulary(self):
"""Builds a unique set of words from the training set.
"""
self.training_set[self.body_column] = self.training_set[self.body_column].str.split()
self.vocabulary = []
for message in self.training_set[self.body_column]:
for word in message:
self.vocabulary.append(word)
self.vocabulary = list(set(self.vocabulary))
def _count_tokens(self):
"""Computes the count of all tokens in each message.
"""
word_counts_per_message = {unique_word: [0] * len(self.training_set[self.body_column]) for unique_word in self.vocabulary}
for index, message in enumerate(self.training_set[self.body_column]):
for word in message:
word_counts_per_message[word][index] += 1
word_counts = | pandas.DataFrame(word_counts_per_message) | pandas.DataFrame |
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
from __future__ import print_function, absolute_import
import errno
import time
import numpy as np
import matplotlib
import torch.nn as nn
import torch.nn.init as init
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import datetime
import pandas as pd
import torch.nn.parallel
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedKFold
from torch.autograd import Variable
from torch.utils.data import TensorDataset
from torchvision.transforms import *
import nnmodels as nnmodels
from os import listdir
import sys
# __all__ = ['Logger', 'LoggerMonitor', 'savefig']
# __all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'accuracy']
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
def savefig(fname, dpi=None):
dpi = 500 if dpi == None else dpi
plt.savefig(fname, dpi=dpi)
def plot_overlap(logger, names=None):
names = logger.names if names == None else names
numbers = logger.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
if name in ['Train Acc.', 'Valid Acc.']:
plt.plot(x, 100 - np.asarray(numbers[name], dtype='float'))
else:
plt.plot(x, np.asarray(numbers[name]))
return [logger.title + '(' + name + ')' for name in names]
class Logger(object):
'''Save training process to log file with simple plot function.'''
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = '' if title == None else title
if fpath is not None:
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for _, name in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
# initialize numbers as empty list
self.numbers = {}
self.names = names
for _, name in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers):
assert len(self.names) == len(numbers), 'Numbers do not match names'
for index, num in enumerate(numbers):
self.file.write("{0:.6f}".format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
def plot(self, names=None):
names = self.names if names == None else names
numbers = self.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
plt.legend([self.title + '(' + name + ')' for name in names])
plt.grid(True)
def close(self):
if self.file is not None:
self.file.close()
class LoggerMonitor(object):
'''Load and visualize multiple logs.'''
def __init__(self, paths):
'''paths is a distionary with {name:filepath} pair'''
self.loggers = []
for title, path in paths.items():
logger = Logger(path, title=title, resume=True)
self.loggers.append(logger)
def plot(self, names=None):
plt.figure()
plt.plot()
legend_text = []
for logger in self.loggers:
legend_text += plot_overlap(logger, names)
legend_text = ['WRN-28-10+Ours (error 17.65%)', 'WRN-28-10 (error 18.68%)']
plt.legend(legend_text, loc=0)
plt.ylabel('test error (%)')
plt.xlabel('epoch')
plt.grid(True)
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TrainningValidationSplitDataset(torch.utils.data.Dataset):
def __init__(self, full_ds, offset, length):
self.full_ds = full_ds
self.offset = offset
self.length = length
assert len(full_ds) >= offset + length, Exception("Parent Dataset not long enough")
super(TrainningValidationSplitDataset, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.full_ds[i + self.offset]
def trainTestSplit(dataset, val_share):
val_offset = int(len(dataset) * (1 - val_share))
# print("Offest:" + str(val_offset))
return TrainningValidationSplitDataset(dataset, 0, val_offset), TrainningValidationSplitDataset(dataset, val_offset,
len(dataset) - val_offset)
def createNewDir(BASE_FOLDER):
parquet_dir = os.path.join(BASE_FOLDER, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(parquet_dir)
return parquet_dir
def savePred(df_pred, local_model, val_score, train_score, save_path):
pre = save_path + '/' + '/pth/'
if not os.path.isdir(pre):
os.makedirs(pre)
fName = pre + str(val_score) + '_' + str(train_score)
torch.save(local_model.state_dict(), fName + '_cnn.pth')
csv_path = str(fName + '_submission.csv')
df_pred.to_csv(csv_path, columns=('id', 'is_iceberg'), index=None)
print(csv_path)
def MinMaxBestBaseStacking(input_folder, best_base, output_path):
sub_base = pd.read_csv(best_base)
all_files = os.listdir(input_folder)
# Read and concatenate submissions
outs = [pd.read_csv(os.path.join(input_folder, f), index_col=0) for f in all_files]
concat_sub = pd.concat(outs, axis=1)
cols = list(map(lambda x: "is_iceberg_" + str(x), range(len(concat_sub.columns))))
concat_sub.columns = cols
concat_sub.reset_index(inplace=True)
# get the data fields ready for stacking
concat_sub['is_iceberg_max'] = concat_sub.iloc[:, 1:6].max(axis=1)
concat_sub['is_iceberg_min'] = concat_sub.iloc[:, 1:6].min(axis=1)
concat_sub['is_iceberg_mean'] = concat_sub.iloc[:, 1:6].mean(axis=1)
concat_sub['is_iceberg_median'] = concat_sub.iloc[:, 1:6].median(axis=1)
# set up cutoff threshold for lower and upper bounds, easy to twist
cutoff_lo = 0.67
cutoff_hi = 0.33
concat_sub['is_iceberg_base'] = sub_base['is_iceberg']
concat_sub['is_iceberg'] = np.where(np.all(concat_sub.iloc[:, 1:6] > cutoff_lo, axis=1),
concat_sub['is_iceberg_max'],
np.where(np.all(concat_sub.iloc[:, 1:6] < cutoff_hi, axis=1),
concat_sub['is_iceberg_min'],
concat_sub['is_iceberg_base']))
concat_sub[['id', 'is_iceberg']].to_csv(output_path,
index=False, float_format='%.12f')
def ensembleVer2(input_folder, output_path):
print('Out:' + output_path)
csv_files = [f for f in os.listdir(input_folder) if f.endswith('.csv')]
model_scores = []
for i, csv in enumerate(csv_files):
df = pd.read_csv(os.path.join(input_folder, csv), index_col=0)
if i == 0:
index = df.index
else:
assert index.equals(df.index), "Indices of one or more files do not match!"
model_scores.append(df)
print("Read %d files. Averaging..." % len(model_scores))
# print(model_scores)
concat_scores = pd.concat(model_scores)
print(concat_scores.head())
concat_scores['is_iceberg'] = concat_scores['is_iceberg'].astype(np.float32)
averaged_scores = concat_scores.groupby(level=0).mean()
assert averaged_scores.shape[0] == len(list(index)), "Something went wrong when concatenating/averaging!"
averaged_scores = averaged_scores.reindex(index)
stacked_1 = pd.read_csv('statoil-submission-template.csv') # for the header
print(stacked_1.shape)
sub = pd.DataFrame()
sub['id'] = stacked_1['id']
sub['is_iceberg'] = np.exp(np.mean(
[
averaged_scores['is_iceberg'].apply(lambda x: np.log(x))
], axis=0))
print(sub.shape)
sub.to_csv(output_path, index=False, float_format='%.9f')
print("Averaged scores saved to %s" % output_path)
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def XnumpyToTensor(x_data_np, args):
x_data_np = np.array(x_data_np, dtype=np.float32)
if args.use_cuda:
X_tensor = (torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor = (torch.from_numpy(x_data_np)) # Note the conversion for pytorch
return X_tensor
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def YnumpyToTensor(y_data_np, args):
y_data_np = y_data_np.reshape((y_data_np.shape[0], 1)) # Must be reshaped for PyTorch!
if args.use_cuda:
# Y = Variable(torch.from_numpy(y_data_np).type(torch.LongTensor).cuda())
Y_tensor = (torch.from_numpy(y_data_np)).type(torch.FloatTensor).cuda() # BCEloss requires Float
else:
# Y = Variable(torch.squeeze (torch.from_numpy(y_data_np).type(torch.LongTensor))) #
Y_tensor = (torch.from_numpy(y_data_np)).type(torch.FloatTensor) # BCEloss requires Float
return Y_tensor
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
assert total_epoch > 0
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_losses = self.epoch_losses - 1
self.epoch_accuracy = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy = self.epoch_accuracy
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(
self.total_epoch, idx)
self.epoch_losses[idx, 0] = train_loss
self.epoch_losses[idx, 1] = val_loss
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
return self.max_accuracy(False) == val_acc
def max_accuracy(self, istrain):
if self.current_epoch <= 0: return 0
if istrain:
return self.epoch_accuracy[:self.current_epoch, 0].max()
else:
return self.epoch_accuracy[:self.current_epoch, 1].max()
def plot_curve(self, save_path, args, model):
title = 'PyTorch-Ensembler:' + str((type(model).__name__)).upper() + ',LR:' + str(args.lr) + ',DataSet:' + str(args.dataset).upper() + ',' + '\n'\
+ ',Params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0) + ',Seed: %.2f' % args.manualSeed + \
",Torch: {}".format(torch.__version__) + ", Batch:{}".format(args.batch_size)
dpi = 80
width, height = 1200, 800
legend_fontsize = 14
scale_distance = 48.8
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 1.0)
interval_y = 0.05 / 3.0
interval_x = 1
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 1.0 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=18)
plt.xlabel('EPOCH', fontsize=16)
plt.ylabel('LOSS/ACC', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0] / 100.0
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='tr-accuracy/100', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1] / 100.0
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='val-accuracy/100', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis, color='r', linestyle=':', label='tr-loss', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis, color='b', linestyle=':', label='val-loss', lw=4)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
# print('---- save figure {} into {}'.format(title, save_path))
plt.close(fig)
def set_optimizer_lr(optimizer, lr):
# callback to set the learning rate in an optimizer, without rebuilding the whole optimizer
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
import math
# https://github.com/gngdb/pytorch-cifar-sgdr/blob/master/main.py
def sgdr(period, batch_idx):
# returns normalised anytime sgdr schedule given period and batch_idx
# best performing settings reported in paper are T_0 = 10, T_mult=2
# so always use T_mult=2
batch_idx = float(batch_idx)
restart_period = period
while batch_idx / restart_period > 1.:
batch_idx = batch_idx - restart_period
restart_period = restart_period * 2.
radians = math.pi * (batch_idx / restart_period)
return 0.5 * (1.0 + math.cos(radians))
# def adjust_learning_rate(optimizer, epoch):
# global lr
# """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = lr * (0.01 ** (epoch // 10))
# for param_group in optimizer.state_dict()['param_groups']:
# param_group['lr'] = lr
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 after 20 and 40 and 60 epochs"""
# global lr
lr = args.lr * (0.5 ** (epoch // 33)) * (0.5 ** (epoch // 20)) * (0.5 ** (epoch // 55))
print ('adjust_learning_rate: {} '.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def fixSeed(args):
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if args.use_cuda:
torch.cuda.manual_seed(args.manualSeed)
torch.cuda.manual_seed_all(args.manualSeed)
def getStatoilTrainValLoaders(args,n_folds=5,current_fold=0):
fixSeed(args)
local_data = pd.read_json(args.data_path + '/train.json')
skf = StratifiedKFold(n_splits=n_folds,random_state=2018)
x=local_data['id'].values
y=local_data['is_iceberg'].values
for i,(train_ind,val_ind) in enumerate(skf.split(X=x,y=y)):
if i<current_fold:
pass
else:
tr_data = local_data.iloc[train_ind,:]
val_data = local_data.iloc[val_ind,:]
break
# local_data = shuffle(local_data) # otherwise same validation set each time!
# local_data = local_data.reindex(np.random.permutation(local_data.index))
tr_data['band_1'] = tr_data['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
tr_data['band_2'] = tr_data['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
tr_data['inc_angle'] = pd.to_numeric(tr_data['inc_angle'], errors='coerce')
tr_data['inc_angle'].fillna(0, inplace=True)
val_data['band_1'] = val_data['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
val_data['band_2'] = val_data['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
val_data['inc_angle'] = pd.to_numeric(val_data['inc_angle'], errors='coerce')
val_data['inc_angle'].fillna(0, inplace=True)
band_1_tr = np.concatenate([im for im in tr_data['band_1']]).reshape(-1, 75, 75)
band_2_tr = np.concatenate([im for im in tr_data['band_2']]).reshape(-1, 75, 75)
#band_3_tr = (band_1_tr+band_2_tr)/2
local_full_img_tr = np.stack([band_1_tr, band_2_tr], axis=1)#,band_3_tr], axis=1)
band_1_val = np.concatenate([im for im in val_data['band_1']]).reshape(-1, 75, 75)
band_2_val = np.concatenate([im for im in val_data['band_2']]).reshape(-1, 75, 75)
#band_3_val = (band_1_val+band_2_val)/2
local_full_img_val = np.stack([band_1_val, band_2_val], axis=1)#,band_3_val], axis=1)
train_imgs = XnumpyToTensor(local_full_img_tr, args)
train_targets = YnumpyToTensor(tr_data['is_iceberg'].values, args)
dset_train = TensorDataset(train_imgs, train_targets)
val_imgs = XnumpyToTensor(local_full_img_val, args)
val_targets = YnumpyToTensor(val_data['is_iceberg'].values, args)
dset_val = TensorDataset(val_imgs, val_targets)
# local_train_ds, local_val_ds = trainTestSplit(dset_train, args.validationRatio)
local_train_ds, local_val_ds = dset_train, dset_val
local_train_loader = torch.utils.data.DataLoader(local_train_ds, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
local_val_loader = torch.utils.data.DataLoader(local_val_ds, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
return local_train_loader, local_val_loader, local_train_ds, local_val_ds
def selectModel(args, m):
model = None
print("==> Creating model '{}'".format(m))
if m.startswith('senet'): # block, n_size=1, num_classes=1, num_rgb=2, base=32
model = nnmodels.senetXX_generic(args.num_classes, args.imgDim, args.base_factor)
# model = nnmodels.senet32_RG_1_classes(args.num_classes, args.imgDim)
args.batch_size = 4
args.batch_size = 4
args.epochs = 250
args.lr = 0.0007 # do not change !!! optimal for the Statoil data set
if m.startswith('densenet'):
model = nnmodels.densnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 32
args.batch_size = 32
args.epochs = 30
args.lr = 0.05
if m.startswith('minidensenet'):
model = nnmodels.minidensnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 32
args.batch_size = 32
args.epochs = 35
args.lr = 0.005 * 2
if m.startswith('vggnet'):
model = nnmodels.vggnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 64
args.batch_size = 64
args.epochs = 88
args.lr = 0.0005
if m.startswith('resnext'):
model = nnmodels.resnetxtXX_generic(args.num_classes, args.imgDim)
args.batch_size = 16
args.batch_size = 16
args.epochs = 66
args.lr = 0.0005
if m.startswith('lenet'):
model = nnmodels.lenetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 64
args.batch_size = 64
args.epochs = 88
if m.startswith('wrn'):
model = nnmodels.wrnXX_generic(args.num_classes, args.imgDim)
args.batch_size = 16
args.batch_size = 16
args.epochs = 34
args.lr = 0.0005*2
if m.startswith('simple'):
model = nnmodels.simpleXX_generic(args.num_classes, args.imgDim)
args.batch_size = 256
args.batch_size = 256
args.epochs = 120
# if m.startswith('unet'):
# model = nnmodels.unetXX_generic(args.num_classes, args.imgDim)
# args.batch_size = 64
# args.batch_size = 64
# args.epochs = 50
# if m.startswith('link'):
# model = nnmodels.linknetXX_generic(args.num_classes, args.imgDim)
# args.batch_size = 64
# args.batch_size = 64
# args.epochs = 50
return model
def BinaryInferenceOofAndTest(local_model,args,n_folds = 5,current_fold=0):
if args.use_cuda:
local_model.cuda()
local_model.eval()
df_test_set = pd.read_json(args.data_path + '/test.json')
df_test_set['band_1'] = df_test_set['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['band_2'] = df_test_set['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['inc_angle'] = pd.to_numeric(df_test_set['inc_angle'], errors='coerce')
# df_test_set.head(3)
print(df_test_set.shape)
columns = ['id', 'is_iceberg']
df_pred_test = pd.DataFrame(data=np.zeros((0, len(columns))), columns=columns)
# df_pred.id.astype(int)
for index, row in df_test_set.iterrows():
rwo_no_id = row.drop('id')
band_1_test = (rwo_no_id['band_1']).reshape(-1, 75, 75)
band_2_test = (rwo_no_id['band_2']).reshape(-1, 75, 75)
# band_3_test = (band_1_test + band_2_test) / 2
full_img_test = np.stack([band_1_test, band_2_test], axis=1)
x_data_np = np.array(full_img_test, dtype=np.float32)
if args.use_cuda:
X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
# X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors
predicted_val = (local_model(X_tensor_test).data).float() # probabilities
p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float
df_pred_test = df_pred_test.append({'id': row['id'], 'is_iceberg': p_test}, ignore_index=True)
df_val_set = pd.read_json(args.data_path + '/train.json')
skf = StratifiedKFold(n_splits=n_folds,random_state=2018)
x=df_val_set['id'].values
y=df_val_set['is_iceberg'].values
columns = ['id', 'is_iceberg']
for i,(train_ind,val_ind) in enumerate(skf.split(X=x,y=y)):
if i<current_fold:
pass
else:
ids_and_labels = df_val_set.iloc[val_ind,[2,4]]
df_val_set = df_val_set.iloc[val_ind,:]
break
df_val_set['band_1'] = df_val_set['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
df_val_set['band_2'] = df_val_set['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
df_val_set['inc_angle'] = pd.to_numeric(df_val_set['inc_angle'], errors='coerce')
# df_test_set.head(3)
print(df_val_set.shape)
columns = ['id', 'is_iceberg']
df_pred_val = pd.DataFrame(data=np.zeros((0, len(columns))), columns=columns)
# df_pred.id.astype(int)
for index, row in df_val_set.iterrows():
rwo_no_id = row.drop('id')
band_1_test = (rwo_no_id['band_1']).reshape(-1, 75, 75)
band_2_test = (rwo_no_id['band_2']).reshape(-1, 75, 75)
# band_3_test = (band_1_test + band_2_test) / 2
full_img_test = np.stack([band_1_test, band_2_test], axis=1)
x_data_np = np.array(full_img_test, dtype=np.float32)
if args.use_cuda:
X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
# X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors
predicted_val = (local_model(X_tensor_test).data).float() # probabilities
p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float
df_pred_val = df_pred_val.append({'id': row['id'], 'is_iceberg': p_test}, ignore_index=True)
return df_pred_val, df_pred_test, ids_and_labels
def BinaryInference(local_model, args):
if args.use_cuda:
local_model.cuda()
local_model.eval()
df_test_set = | pd.read_json(args.data_path + '/test.json') | pandas.read_json |
# coding: utf-8
# In[3]:
import pandas as pd
pd.set_option('display.notebook_repr_html', False)
#Leyendo datos con el método read_csv
data = pd.read_csv('capitulo2/titanic3.csv')
# In[4]:
data
# In[6]:
#Leyendo archivos txt
data = pd.read_csv('capitulo2/Customer Churn Model.txt')
# In[7]:
data
# In[34]:
#Abriendo archivos usando el método open
data = open('capitulo2/Customer Churn Model.txt','r')
cols = data.readline().strip().split(',')
no_cols=len(cols)
# In[35]:
cols
# In[20]:
no_cols
# In[36]:
#Encontrar el número de registros
counter = 0
main_dict = {}
for col in cols:
main_dict[col]=[]
for line in data:
values = line.strip().split(',')
for i in range(len(cols)):
main_dict[cols[i]].append(values[i])
counter += 1
print("El conjunto de datos tiene %d filas y %d columnas" % (counter, no_cols))
#El método readline se queda en la última ubicación y si se corre otra vez el counter es 0
# In[38]:
df = pd.DataFrame(main_dict)
df.head()
# In[41]:
#Generamos un archivo con un delimitador '/t'
infile = 'capitulo2/Customer Churn Model.txt'
outfile = 'capitulo2/Tab Customer Churn Model.txt'
with open(infile) as infile1:
with open(outfile, 'w') as outfile1:
for line in infile1:
fields = line.split(',')
outfile1.write('/t'.join(fields))
# In[42]:
data = pd.read_csv('capitulo2/Tab Customer Churn Model.txt',sep='/t')
# In[43]:
data
# In[60]:
#Leyendo datos de una URL
import csv
import urllib
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
response = urllib.request.urlopen(url)
cr = csv.reader(response,'excel')
for rows in cr:
print(rows)
# In[61]:
archivo = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv')
# In[62]:
archivo
# In[64]:
#Leyendo archivos .xls o .xlsx
#read_excel funciona para ambos el segundo argumento es la hoja que se quiere leer
data = pd.read_excel('capitulo2/titanic3.xlsx','titanic3')
data
# In[65]:
#Escribir en un archivo CSV o EXCEL
#Podemos escribir un Data Frame dentro de un archivo excel o csv
#eldataframe.to_csv()
#eldataframe.to_excel()
df.to_excel('capitulo2/dataframe.xls')
# In[66]:
data = pd.read_csv('capitulo2/titanic3.csv')
data.head()
# In[67]:
#filas y columnas del archivo
data.shape
# In[69]:
#Nombres de las columnas
data.columns.values
# In[72]:
#Descripción de los datos
data.describe()
# In[73]:
#Tipo de cada columna
data.dtypes
# In[75]:
#Valores perdidos en el DataFrame
#Devuelve una serie indicando True en la celda con valores perdidos y False para valores no perdidos
#pd.isnull(data['body'])
#lo contrario podria hacerse con
pd.notnull(data['body'])
# In[77]:
#Numéro de entradas con valores perdidos
| pd.isnull(data['body']) | pandas.isnull |
import numpy as np
import pandas as pd
import pytest
from ber_public.deap import dim
@pytest.fixture
def building_fabric():
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = pd.Series([0.05])
effective_air_rate_change = pd.Series([0.5])
return (
floor_uvalue,
roof_uvalue,
wall_uvalue,
window_uvalue,
door_uvalue,
thermal_bridging_factor,
effective_air_rate_change,
)
@pytest.fixture
def building_area():
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = | pd.Series([85.7]) | pandas.Series |
from Bio import PDB
import numpy as np
import pandas as pd
from biodescriptors.calc import constraints
from biodescriptors.calc import utils
def _calc_dssp_hel(dssp, ref):
"""TODO: Documentation"""
# TODO: Split function into smaller functions
chainA = [key for key in dssp.keys() if key[0] == 'A']
helix_map = np.zeros([1, len(chainA)])
res_num = utils.getResidues(dssp)
dssp_start = 0
dssp_end = 0
result = []
#print(res_num)
for i in range(len(ref)):
#print(ref[i][0])
start = utils.getNum(ref[i][0], res_num)
end = utils.getNum(ref[i][1], res_num)
#finding starting point
start_longer_counter = 0
start_shorter_counter = 0
# TODO: wrap in single func
if dssp[list(dssp.keys())[start]][2] == 'H':
# check the first iteration
while dssp[list(dssp.keys())[start-1]][2] == 'H' and utils.getRes(start-1, res_num) != dssp_end:
start_longer_counter+=1
start-=1
missing=False
else:
missing_counter = 0
missing = True
while missing_counter < (end-start):
start+=1
start_shorter_counter+=1
if dssp[list(dssp.keys())[start]][2] == 'H':
missing = False
break
else:
missing_counter +=1
#
#finding endpoint
if missing == False:
end_longer_counter = 0
end_shorter_counter = 0
if dssp[list(dssp.keys())[end]][2] == 'H':
if i != (len(ref)-1):
while dssp[list(dssp.keys())[end+1]][2] == 'H' and end+1 != utils.getNum(ref[i+1][0], res_num):
end_longer_counter+=1
end+=1
else:
while dssp[list(dssp.keys())[end+1]][2] == 'H':
end_longer_counter+=1
end+=1
try:
dssp[list(dssp.keys())[end+1]][2] == 'H'
except IndexError:
break
else:
while dssp[list(dssp.keys())[end]][2] != 'H':
end-=1
end_shorter_counter+=1
if start_shorter_counter > 0:
dssp_start = ref[i][0] + start_shorter_counter
else:
dssp_start = ref[i][0] - start_longer_counter
if end_shorter_counter > 0:
dssp_end = ref[i][1] - end_shorter_counter
else:
dssp_end = ref[i][1] + end_longer_counter
result.append([dssp_start, dssp_end])
for i in range(start, end+1):
helix_map[0][i] = 1
else:
result.append([0, 0])
extras = []
map_elem=0
# TODO: wrap
while map_elem < helix_map.shape[1]:
if helix_map[0][map_elem] == 0:
if dssp[list(dssp.keys())[map_elem]][2] == 'H':
extra_counter = map_elem
while dssp[list(dssp.keys())[extra_counter+1]][2] == 'H':
extra_counter+=1
extras.append([utils.getRes(map_elem, res_num), utils.getRes(extra_counter, res_num)])
if map_elem == extra_counter:
map_elem+=1
else:
map_elem=extra_counter+1
else:
map_elem+=1
else:
map_elem+=1
n_res = 0
for e in extras:
n_res+=e[1]-e[0]+1
return result, n_res
def calc_dssp_hel(pdb_file, ref):
"""
Calculates differences with DSSP output.
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of lists (int, int)
List of amino acid numbers pairs (start, end) for each helix.
Returns:
-------
???.
"""
_, _, model, _, _ = utils.get_model_and_structure(pdb_file)
dssp = PDB.DSSP(model, pdb_file)
if not isinstance(ref, list):
if ref is None:
raise ValueError(f"Ref list is None!")
else:
raise ValueError(f"Unexpected type for ref: {type(ref)}")
return _calc_dssp_hel(dssp, ref)
def dssp_hel_to_pandas(pdb_file, ref, protein_name=None, **kwargs):
"""TODO: write documentation.
Putting differences with dssp in pandas dataframe.
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of ints
List of amino acid numbers pairs (start, end) for each helix.
protein_name: str, default=None
Protein name to be added to the resulting dataframe.
Returns:
-------
pandas.DataFrame with calculated descriptor.
"""
cols_dssp = (['prot_name']
+ ['DSSP start_H' + str(elem) for elem in range(1, 14)]
+ ['DSSP end_H' + str(elem) for elem in range(1, 14)])
df_dssp = pd.DataFrame(columns=cols_dssp)
dssp_hels = None
try:
dssp_hels = calc_dssp_hel(pdb_file, ref)
except KeyError:
if protein_name:
print(f'{protein_name}: KeyError while calculating dssp')
else:
print('KeyError while calculating dssp')
except ValueError as e:
if protein_name:
print(f'{protein_name}: {e}')
else:
print(e)
data_dssp_hels = [protein_name]
if dssp_hels is not None:
for hel in dssp_hels[0]:
data_dssp_hels.append(hel[0])
data_dssp_hels.append(hel[1])
df_dssp = df_dssp.append(pd.Series(data_dssp_hels, index=cols_dssp[0:len(data_dssp_hels)]), ignore_index=True)
return df_dssp
def dssp_extra_to_pandas(pdb_file, ref, protein_name=None, **kwargs):
"""
Putting differences with DSSP in pandas dataframe (extra).
Parameters:
----------
pdb_file: str
Filename of .pdb file used for calculation.
ref: list of ints
List of amino acid numbers pairs (start, end) for each helix.
protein_name: str, default=None
Protein name to be added to the resulting dataframe.
Returns:
-------
pandas.DataFrame with calculated descriptor.
"""
cols_extra_res = ['prot_name', 'N_res extra helical']
df_extra = | pd.DataFrame(columns=cols_extra_res) | pandas.DataFrame |
###############################################################################
###
### checksteps
### This file is part of CorePressure
### This file was created by Dr <NAME>
### includes a set of fuctions to process walking data from pedar
###
### Copyright (C) 2018 University of Salford - All Rights Reserved
### You may use, distribute and modify this code under the terms of MIT Licence
### See <filename> or go to <url> for full licence details
###
###############################################################################
import os
import pandas as pd
import numpy as np
def bld_flist(fpath,ftype='.asc'):
''' builds list of all files in directory+subs with given file type '''
flist = [os.path.join(r,file) for r,d,f in os.walk(fpath) for file in f
if file.endswith(ftype)]
return(flist)
fpath = 'C:/Temp/SPM_LOW CYCLES/'
filelist = bld_flist(fpath,ftype='.xlsx')
step_count = []
low_steps = []
OutSet = pd.DataFrame(columns = ['EVA_Left','EVA_Right','P_Left','P_Right'])
for fname in filelist:
df = pd.read_excel(fname, sheet_name=None)
dvals = pd.DataFrame(index=[fname])
for cond in df:
step_count.append(len(df[cond]))
cond_strip = cond.split('_')[-2:]
cond_tag = cond_strip[0] + '_' + cond_strip[1]
# print(cond_tag)
dvals[cond_tag] = [len(df[cond])]
if len(df[cond]) < 15:
low_steps.append(cond)
dvals = dvals.reindex(sorted(dvals.columns), axis=1)
print(dvals)
# dvals = pd.DataFrame(data=vals,columns=cols)
OutSet = | pd.concat([OutSet, dvals], axis=0) | pandas.concat |
import os
import pandas as pd
def remove_duplicates(x):
"""
Remove duplicates from dataframe x
This function will remove duplicate values from
the original dataframe
Return a set
"""
x = x.apply(lambda x: x.split(", "))
x = x.explode()
x = x.drop_duplicates()
return set(x)
def categorize_transportations(transportations):
"""
Categorize transportations values by their keyword.
Value with string Bandara will be categorized as airport,
Value with string Terminal will be categorized as bus_station,
Value with string Stasiun will be categorized as train_station.
Return dataframes of categorized transportation
"""
airports, bus_stations, train_stations = [], [], []
# loop through set_of_transportation and
# add value to corresponding variable
# according to their keyword
set_of_transportations = remove_duplicates(transportations)
for x in set_of_transportations:
keyword = x.split()[0]
if keyword == "Bandara":
airports.append(x)
elif keyword == "Terminal":
bus_stations.append(x)
elif keyword == "Stasiun":
train_stations.append(x)
return | pd.DataFrame(data=airports, columns=["airport"]) | pandas.DataFrame |
import numpy as np
import multiprocessing as mp
import os
import pandas as pd
import re
import tqdm
import traceback
# condition.xlsx
co = pd.read_excel('data/condition5.xlsx', 'study5')
st = pd.read_excel('data/condition6.xlsx', 'list of stimuli')
# missing.xlsx
mi = pd.read_excel('data/missing.xlsx', 'Study5')
baseline_marker_id = '-1'
moca_re_pattern = re.compile('MOCA_os(\d+)')
def process_path(moca_path):
try:
subject_id = int(re.findall(moca_re_pattern, moca_path)[0].lstrip('0'))
moca_adv_path = f'data/Study 5/MocaNovaAdv{subject_id}.csv'
moca_bas_path = f'data/Study 5/MocaNovaBas{subject_id}.csv'
ADV = os.path.exists(moca_adv_path)
BAS = os.path.exists(moca_bas_path)
if not ADV:
print(f'{moca_adv_path} does not exist.')
if not BAS:
print(f'{moca_bas_path} does not exist.')
# condition.xlsx
subject_row = co.loc[co['Subject'] == subject_id].iloc[0, :] # first row for study5, second for study5a
subject_sex = int(subject_row['płecM0K1'])
subject_age = int(subject_row['wiek'])
output_subject_id = int(subject_row['id_manual'])
try:
film1_marker_id = str(int(subject_row['Film1']))
film2_marker_id = str(int(subject_row['Film2']))
film3_marker_id = str(int(subject_row['Film3']))
except Exception as e:
print(f'{moca_path} - some film marker does not exist for this subject, processing skipped.')
return 0
film1_marker_name = st[st.iloc[:, 2] == int(film1_marker_id)].iloc[:, 4].item()
film2_marker_name = st[st.iloc[:, 2] == int(film2_marker_id)].iloc[:, 4].item()
film3_marker_name = st[st.iloc[:, 2] == int(film3_marker_id)].iloc[:, 4].item()
# Moca
column_names = ['timestamp', 'meter1', 'ecg1', 'sc1', 'marker_ed']
dtype_dict = {k: 'float' for k in column_names}
dtype_dict['marker_ed'] = 'string'
ed = pd.read_csv(moca_path, sep='\t', header=None, skiprows=9, decimal=',', names=column_names, dtype=dtype_dict)
ed['marker_ed'] = ed['marker_ed'].apply(lambda x: x.strip() if type(x) == str else x)
for marker_ed in ['#* m', '#* 1', '#* 4', '#* 12', '#* 20']:
if not ed['marker_ed'].isin([marker_ed]).any():
print(f'{moca_path} does not have {marker_ed} marker, cannot sync ADV and BAS (or cannot generate all periods).')
ADV = False
BAS = False
# MocaNovaAdv
if ADV:
column_names = ['Time','SV','CO','SVI','CI','dp-dt','SPTI','RPP','DPTI','DPTI-SPTI','LVET','ZAo','Cwk','Rp','TPR','BSA','TPRI','maxAortaArea','marker_adv','Region','empty']
dtype_dict = {k: 'float' for k in column_names}
dtype_dict['marker_adv'] = 'string'
adv = pd.read_csv(moca_adv_path, sep=';', header=None, skiprows=8, names=column_names, dtype=dtype_dict)
adv = adv[['Time', 'CO', 'TPR', 'marker_adv']]
# MocaNovaAdv - resample
adv['Timedelta'] = adv.apply(lambda x: | pd.to_timedelta(x['Time'], unit='s') | pandas.to_timedelta |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not | Index(i.values) | pandas.Index |
from constants import RANDOM_SEED
from sklearn.model_selection import train_test_split
from pandas import DataFrame
from spacy.lang.tag_map import TAG_MAP
from utils import init_logger
import spacy
import re
import numpy as np
import pandas as pd
### BERT constants
WORDPIECE_PREFIX = "##"
CLS_TOKEN = "[CLS]"
SEP_TOKEN = "[SEP]"
MASK_TOKEN = "[MASK]"
### POS Tags constants
TOKEN_SEPARATOR = " "
WORD_POS_SEPARATOR = "_"
ADJ_POS_TAGS = ("ADJ", "ADV")
POS_TAGS_TUPLE = tuple(sorted(TAG_MAP.keys()))
POS_TAG_IDX_MAP = {str(tag): int(idx) for idx, tag in enumerate(POS_TAGS_TUPLE)}
ADJ_POS_TAGS_IDX = {"ADJ": 0, "ADV": 2}
NUM_POS_TAGS_LABELS = len(POS_TAGS_TUPLE)
sentiment_output_datasets = {0: 'negative', 1: 'positive'}
def clean_review(text: str) -> str:
review_text = re.sub("\n", "", text)
review_text = re.sub(" and quot;", '"', review_text)
review_text = re.sub("<br />", "", review_text)
review_text = re.sub(WORD_POS_SEPARATOR, "", review_text)
review_text = re.sub("\s+", TOKEN_SEPARATOR, review_text)
# review_text = re.sub(";", ",", review_text)
return review_text.strip()
class PretrainedPOSTagger:
"""This module requires en_core_web_lg model to be installed"""
tagger = spacy.load("en_core_web_lg")
@staticmethod
def tag_review(review: str) -> str:
review_text = clean_review(review)
tagged_review = [f"{token.text}{WORD_POS_SEPARATOR}{token.pos_}"
for token in PretrainedPOSTagger.tagger(review_text)]
return TOKEN_SEPARATOR.join(tagged_review)
def split_data(df: DataFrame, path: str, prefix: str, label_column: str = "label"):
train, test = train_test_split(df, test_size=0.2, stratify=df[label_column], random_state=RANDOM_SEED)
train, dev = train_test_split(train, test_size=0.2, stratify=train[label_column], random_state=RANDOM_SEED)
df.sort_index().to_csv(f"{path}/{prefix}_all.csv")
train.sort_index().to_csv(f"{path}/{prefix}_train.csv")
dev.sort_index().to_csv(f"{path}/{prefix}_dev.csv")
test.sort_index().to_csv(f"{path}/{prefix}_test.csv")
return train, dev, test
def print_text_stats(df: DataFrame, text_column: str):
sequence_lengths = df[text_column].apply(lambda text: int(len(str(text).split(TOKEN_SEPARATOR))))
print(f"Number of sequences in dataset: {len(sequence_lengths)}")
print(f"Max sequence length in dataset: {np.max(sequence_lengths)}")
print(f"Min sequence length in dataset: {np.min(sequence_lengths)}")
print(f"Median sequence length in dataset: {np.median(sequence_lengths)}")
print(f"Mean sequence length in dataset: {np.mean(sequence_lengths)}")
def bias_random_sampling(df: DataFrame, bias_column: str, biasing_factor: float, seed: int = RANDOM_SEED):
return df.sample(frac=biasing_factor, random_state=seed)
def bias_ranked_sampling(df: DataFrame, bias_column: str, biasing_factor: float):
return df.sort_values(by=bias_column, ascending=False).head(int(len(df)*biasing_factor))
def bias_aggressive(df_a, df_b, label_column, bias_column,
biased_label, biasing_factor, sampling_method=bias_random_sampling):
"""
Biases selected class by biasing factor, and uses same factor to inversely bias all other classes.
:param bias_column:
:param label_column:
:param sampling_method:
:param df_a:
:param df_b:
:param biased_label:
:param biasing_factor:
:return:
"""
df_biased = | pd.DataFrame(columns=df_a.columns) | pandas.DataFrame |
from typing import Union
from copy import deepcopy
from functools import reduce
import numpy as np
import pandas as pd
import altair as alt
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import paired_distances
from whatlies.embedding import Embedding
from whatlies.common import plot_graph_layout
class EmbeddingSet:
"""
This object represents a set of `Embedding`s. You can use the same operations
as an `Embedding` but here we apply it to the entire set instead of a single
`Embedding`.
**Parameters**
- **embeddings**: list of embeddings or dictionary with name: embedding.md pairs
- **name**: custom name of embeddingset
Usage:
```
from whatlies.embedding.md import Embedding
from whatlies.embeddingset import EmbeddingSet
```
"""
def __init__(self, *embeddings, name=None):
if not name:
name = "Emb"
self.name = name
if len(embeddings) == 1:
# we assume it is a dictionary here
self.embeddings = embeddings[0]
else:
# we assume it is a tuple of tokens
self.embeddings = {t.name: t for t in embeddings}
def __contains__(self, item):
"""
Checks if an item is in the embeddingset.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
"foo" in emb # True
"dinosaur" in emb # False
```
"""
return item in self.embeddings.keys()
def __iter__(self):
"""
Iterate over all the embeddings in the embeddingset.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
[e for e in emb]
```
"""
return self.embeddings.values().__iter__()
def __add__(self, other):
"""
Adds an embedding to each element in the embeddingset.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
(emb).plot(kind="arrow")
(emb + buz).plot(kind="arrow")
```
"""
new_embeddings = {k: emb + other for k, emb in self.embeddings.items()}
return EmbeddingSet(new_embeddings, name=f"({self.name} + {other.name})")
def __sub__(self, other):
"""
Subtracts an embedding from each element in the embeddingset.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
(emb).plot(kind="arrow")
(emb - buz).plot(kind="arrow")
```
"""
new_embeddings = {k: emb - other for k, emb in self.embeddings.items()}
return EmbeddingSet(new_embeddings, name=f"({self.name} - {other.name})")
def __or__(self, other):
"""
Makes every element in the embeddingset othogonal to the passed embedding.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
(emb).plot(kind="arrow")
(emb | buz).plot(kind="arrow")
```
"""
new_embeddings = {k: emb | other for k, emb in self.embeddings.items()}
return EmbeddingSet(new_embeddings, name=f"({self.name} | {other.name})")
def __rshift__(self, other):
"""
Maps every embedding in the embedding set unto the passed embedding.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar)
(emb).plot(kind="arrow")
(emb >> buz).plot(kind="arrow")
```
"""
new_embeddings = {k: emb >> other for k, emb in self.embeddings.items()}
return EmbeddingSet(new_embeddings, name=f"({self.name} >> {other.name})")
def compare_against(self, other, mapping="direct"):
if mapping == "direct":
return [v > other for k, v in self.embeddings.items()]
def to_X(self):
"""
Takes every vector in each embedding and turns it into a scikit-learn compatible `X` matrix.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
emb = EmbeddingSet(foo, bar, buz)
X = emb.to_X()
```
"""
X = np.array([i.vector for i in self.embeddings.values()])
return X
def to_X_y(self, y_label):
"""
Takes every vector in each embedding and turns it into a scikit-learn compatible `X` matrix.
Also retreives an array with potential labels.
Usage:
```python
from whatlies.embedding import Embedding
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3])
bar = Embedding("bar", [0.7, 0.2])
buz = Embedding("buz", [0.1, 0.9])
bla = Embedding("bla", [0.2, 0.8])
emb1 = EmbeddingSet(foo, bar).add_property("label", lambda d: 'group-one')
emb2 = EmbeddingSet(buz, bla).add_property("label", lambda d: 'group-two')
emb = emb1.merge(emb2)
X, y = emb.to_X_y(y_label='label')
```
"""
X = np.array([e.vector for e in self.embeddings.values()])
y = np.array([getattr(e, y_label) for e in self.embeddings.values()])
return X, y
def transform(self, transformer):
"""
Applies a transformation on the entire set.
Usage:
```python
from whatlies.embeddingset import EmbeddingSet
from whatlies.transformers import Pca
foo = Embedding("foo", [0.1, 0.3, 0.10])
bar = Embedding("bar", [0.7, 0.2, 0.11])
buz = Embedding("buz", [0.1, 0.9, 0.12])
emb = EmbeddingSet(foo, bar, buz).transform(Pca(2))
```
"""
return transformer(self)
def __getitem__(self, thing):
"""
Retreive a single embedding from the embeddingset.
Usage:
```python
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3, 0.10])
bar = Embedding("bar", [0.7, 0.2, 0.11])
buz = Embedding("buz", [0.1, 0.9, 0.12])
emb = EmbeddingSet(foo, bar, buz)
emb["buz"]
```
"""
if not isinstance(thing, list):
return self.embeddings[thing]
new_embeddings = {k: emb for k, emb in self.embeddings.items()}
names = ",".join(thing)
return EmbeddingSet(new_embeddings, name=f"{self.name}.subset({names})")
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __len__(self):
return len(self.embeddings.keys())
def merge(self, other):
"""
Concatenates two embeddingssets together
Arguments:
other: another embeddingset
Usage:
```python
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3, 0.10])
bar = Embedding("bar", [0.7, 0.2, 0.11])
buz = Embedding("buz", [0.1, 0.9, 0.12])
xyz = Embedding("xyz", [0.1, 0.9, 0.12])
emb1 = EmbeddingSet(foo, bar)
emb2 = EmbeddingSet(xyz, buz)
both = em1.merge(emb2)
```
"""
return EmbeddingSet({**self.embeddings, **other.embeddings})
def add_property(self, name, func):
"""
Adds a property to every embedding in the set. Very useful for plotting because
a property can be used to assign colors.
Arguments:
name: name of the property to add
func: function that receives an embedding and needs to output the property value
```python
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [0.1, 0.3, 0.10])
bar = Embedding("bar", [0.7, 0.2, 0.11])
emb = EmbeddingSet(foo, bar)
emb_with_property = emb.add_property('example', lambda d: 'group-one')
```
"""
return EmbeddingSet(
{k: e.add_property(name, func) for k, e in self.embeddings.items()}
)
def average(self, name=None):
"""
Takes the average over all the embedding vectors in the embeddingset. Turns it into
a new `Embedding`.
Arguments:
name: manually specify the name of the average embedding
```python
from whatlies.embeddingset import EmbeddingSet
foo = Embedding("foo", [1.0, 0.0])
bar = Embedding("bar", [0.0, 1.0])
emb = EmbeddingSet(foo, bar)
emb.average().vector # [0.5, 0,5]
emb.average(name="the-average").vector # [0.5, 0.5]
```
"""
name = f"{self.name}.average()" if not name else name
x = np.array([v.vector for v in self.embeddings.values()])
return Embedding(name, np.mean(x, axis=0))
def embset_similar(self, emb: Union[str, Embedding], n: int = 10, metric='cosine'):
"""
Retreive an [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] that are the most simmilar to the passed query.
Arguments:
emb: query to use
n: the number of items you'd like to see returned
metric: metric to use to calculate distance, must be scipy or sklearn compatible
Returns:
An [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] containing the similar embeddings.
"""
embs = [w[0] for w in self.score_similar(emb, n, metric)]
return EmbeddingSet({w.name: w for w in embs})
def score_similar(self, emb: Union[str, Embedding], n: int = 10, metric='cosine'):
"""
Retreive a list of (Embedding, score) tuples that are the most similar to the passed query.
Arguments:
emb: query to use
n: the number of items you'd like to see returned
metric: metric to use to calculate distance, must be scipy or sklearn compatible
Returns:
An list of ([Embedding][whatlies.embedding.Embedding], score) tuples.
"""
if n > len(self):
raise ValueError(f"You cannot retreive (n={n}) more items than exist in the Embeddingset (len={len(self)})")
if str(emb) not in self.embeddings.keys():
raise ValueError(f"Embedding for `{str(emb)}` does not exist in this EmbeddingSet")
if isinstance(emb, str):
emb = self[emb]
vec = emb.vector
queries = [w for w in self.embeddings.keys()]
vector_matrix = np.array([w.vector for w in self.embeddings.values()])
distances = pairwise_distances(vector_matrix, vec.reshape(1, -1), metric=metric)
by_similarity = sorted(zip(queries, distances), key=lambda z: z[1])
return [(self[q], float(d)) for q, d in by_similarity[:n]]
def to_matrix(self):
return np.array([w.vector for w in self.embeddings.values()])
def movement_df(self, other, metric="euclidean"):
overlap = list(set(self.embeddings.keys()).union(set(other.embeddings.keys())))
mat1 = np.array([w.vector for w in self[overlap]])
mat2 = np.array([w.vector for w in other[overlap]])
return pd.DataFrame({
'name': overlap,
'movement': paired_distances(mat1, mat2, metric)
}).sort_values(['movement'], ascending=False).reset_index()
def to_axis_df(self, x_axis, y_axis):
if isinstance(x_axis, str):
x_axis = self[x_axis]
if isinstance(y_axis, str):
y_axis = self[y_axis]
return pd.DataFrame({
"x_axis": self.compare_against(x_axis),
"y_axis": self.compare_against(y_axis),
"name": [v.name for v in self.embeddings.values()],
"original": [v.orig for v in self.embeddings.values()],
})
def plot(
self,
kind: str = "scatter",
x_axis: str = None,
y_axis: str = None,
color: str = None,
show_ops: str = False,
**kwargs,
):
"""
Makes (perhaps inferior) matplotlib plot. Consider using `plot_interactive` instead.
Arguments:
kind: what kind of plot to make, can be `scatter`, `arrow` or `text`
x_axis: the x-axis to be used, must be given when dim > 2
y_axis: the y-axis to be used, must be given when dim > 2
color: the color of the dots
show_ops: setting to also show the applied operations, only works for `text`
"""
for k, token in self.embeddings.items():
token.plot(
kind=kind,
x_axis=x_axis,
y_axis=y_axis,
color=color,
show_ops=show_ops,
**kwargs,
)
return self
def plot_graph_layout(self, kind="cosine", **kwargs):
plot_graph_layout(self.embeddings, kind, **kwargs)
return self
def plot_movement(self, other,
x_axis: Union[str, Embedding],
y_axis: Union[str, Embedding],
first_group_name="before",
second_group_name="after",
annot: bool = True,
):
"""
Makes highly interactive plot of the movement of embeddings
between two sets of embeddings.
Arguments:
other: the other embeddingset
x_axis: the x-axis to be used, must be given when dim > 2
y_axis: the y-axis to be used, must be given when dim > 2
first_group_name: the name to give to the first set of embeddings (default: "before")
second_group_name: the name to give to the second set of embeddings (default: "after")
annot: drawn points should be annotated
**Usage**
```python
from whatlies.language import SpacyLanguage
words = ["prince", "princess", "nurse", "doctor", "banker", "man", "woman",
"cousin", "neice", "king", "queen", "dude", "guy", "gal", "fire",
"dog", "cat", "mouse", "red", "bluee", "green", "yellow", "water",
"person", "family", "brother", "sister"]
lang = SpacyLanguage("en_core_web_md")
emb = lang[words]
emb_new = emb - emb['king']
emb.plot_difference(emb_new, 'man', 'woman')
```
"""
if isinstance(x_axis, str):
x_axis = self[x_axis]
if isinstance(y_axis, str):
y_axis = self[y_axis]
df1 = (self.to_axis_df(x_axis, y_axis)
.set_index('original')
.drop(columns=['name']))
df2 = (other.to_axis_df(x_axis, y_axis)
.set_index('original')
.drop(columns=['name'])
.loc[lambda d: d.index.isin(df1.index)])
df_draw = ( | pd.concat([df1, df2]) | pandas.concat |
# Function 0
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 1
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 2
def cleaning_func_2(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 3
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 4
def cleaning_func_4(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 5
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 6
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 7
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 8
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 9
def cleaning_func_9(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 10
def cleaning_func_10(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 11
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 12
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 13
def cleaning_func_13(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 14
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 15
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 16
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 17
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 18
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 19
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 20
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 21
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 22
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 23
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 24
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - | pd.DateOffset(years=5) | pandas.DateOffset |
import pytest
from pandas import Categorical, DataFrame, Series
import pandas.util.testing as tm
def _assert_series_equal_both(a, b, **kwargs):
"""
Check that two Series equal.
This check is performed commutatively.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
tm.assert_series_equal(a, b, **kwargs)
tm.assert_series_equal(b, a, **kwargs)
def _assert_not_series_equal(a, b, **kwargs):
"""
Check that two Series are not equal.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
try:
tm.assert_series_equal(a, b, **kwargs)
msg = "The two Series were equal when they shouldn't have been"
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_series_equal_both(a, b, **kwargs):
"""
Check that two Series are not equal.
This check is performed commutatively.
Parameters
----------
a : Series
The first Series to compare.
b : Series
The second Series to compare.
kwargs : dict
The arguments passed to `tm.assert_series_equal`.
"""
_assert_not_series_equal(a, b, **kwargs)
_assert_not_series_equal(b, a, **kwargs)
@pytest.mark.parametrize("data", [range(3), list("abc"), list("áàä")])
def test_series_equal(data):
_assert_series_equal_both(Series(data), Series(data))
@pytest.mark.parametrize(
"data1,data2",
[
(range(3), range(1, 4)),
(list("abc"), list("xyz")),
(list("áàä"), list("éèë")),
(list("áàä"), list(b"aaa")),
(range(3), range(4)),
],
)
def test_series_not_equal_value_mismatch(data1, data2):
_assert_not_series_equal_both(Series(data1), Series(data2))
@pytest.mark.parametrize(
"kwargs",
[
dict(dtype="float64"), # dtype mismatch
dict(index=[1, 2, 4]), # index mismatch
dict(name="foo"), # name mismatch
],
)
def test_series_not_equal_metadata_mismatch(kwargs):
data = range(3)
s1 = Series(data)
s2 = Series(data, **kwargs)
_assert_not_series_equal_both(s1, s2)
@pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("check_less_precise", [False, True, 0, 1, 2, 3, 10])
def test_less_precise(data1, data2, dtype, check_less_precise):
s1 = Series([data1], dtype=dtype)
s2 = Series([data2], dtype=dtype)
kwargs = dict(check_less_precise=check_less_precise)
if (check_less_precise is False or check_less_precise == 10) or (
(check_less_precise is True or check_less_precise >= 3)
and abs(data1 - data2) >= 0.0001
):
msg = "Series values are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, **kwargs)
else:
_assert_series_equal_both(s1, s2, **kwargs)
@pytest.mark.parametrize(
"s1,s2,msg",
[
# Index
(
Series(["l1", "l2"], index=[1, 2]),
Series(["l1", "l2"], index=[1.0, 2.0]),
"Series\\.index are different",
),
# MultiIndex
(
DataFrame.from_records(
{"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
).c,
DataFrame.from_records(
{"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]
).c,
"MultiIndex level \\[0\\] are different",
),
],
)
def test_series_equal_index_dtype(s1, s2, msg, check_index_type):
kwargs = dict(check_index_type=check_index_type)
if check_index_type:
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, **kwargs)
else:
tm.assert_series_equal(s1, s2, **kwargs)
def test_series_equal_length_mismatch(check_less_precise):
msg = """Series are different
Series length are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
s1 = Series([1, 2, 3])
s2 = Series([1, 2, 3, 4])
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise)
def test_series_equal_values_mismatch(check_less_precise):
msg = """Series are different
Series values are different \\(33\\.33333 %\\)
\\[left\\]: \\[1, 2, 3\\]
\\[right\\]: \\[1, 2, 4\\]"""
s1 = Series([1, 2, 3])
s2 = Series([1, 2, 4])
with pytest.raises(AssertionError, match=msg):
| tm.assert_series_equal(s1, s2, check_less_precise=check_less_precise) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
import pytest
from .conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature, tools
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = pd.Series([10, 5, 0], index=times)
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = pd.Series([0., 23.06066166, 5.], index=times)
expected_module = pd.Series([0., 21.56066166, 5.], index=times)
assert_series_equal(expected_cell, cell_temps)
assert_series_equal(expected_module, module_temps)
def test_pvsyst_cell_default():
result = temperature.pvsyst_cell(900, 20, 5)
assert_allclose(result, 45.137, 0.001)
def test_pvsyst_cell_kwargs():
result = temperature.pvsyst_cell(900, 20, wind_speed=5.0, u_c=23.5,
u_v=6.25, module_efficiency=0.1)
assert_allclose(result, 33.315, 0.001)
def test_pvsyst_cell_ndarray():
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
result = temperature.pvsyst_cell(irrads, temps, wind_speed=winds)
expected = np.array([0.0, 23.96551, 5.0])
assert_allclose(expected, result, 3)
def test_pvsyst_cell_series():
times = pd.date_range(start="2015-01-01", end="2015-01-02", freq="12H")
temps = pd.Series([0, 10, 5], index=times)
irrads = | pd.Series([0, 500, 0], index=times) | pandas.Series |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_connect = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
global yData
global filterActionFinal
global dataSpacePointsIDs
lengthDF = len(df_connect.columns)
if (filterActionFinal == 'compose'):
getList = []
for index, row in df_connect.iterrows():
yDataSelected = []
for column in row[dataSpacePointsIDs]:
yDataSelected.append(column)
storeMode = mode(yDataSelected)
getList.append(storeMode)
df_connect[str(lengthDF)] = getList
countCorrect = []
length = len(df_connect.index)
for index, element in enumerate(yData):
countTemp = 0
dfPart = df_connect[[str(index)]]
for indexdf, row in dfPart.iterrows():
if (int(row.values[0]) == int(element)):
countTemp += 1
countCorrect.append(1 - (countTemp/length))
return countCorrect
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP):
global Results
global AllTargets
Results = []
parametersGen = PreprocessingParam()
PerClassMetrics = preProcessPerClassM()
FeatureAccuracy = preProcessFeatAcc()
perm_imp_eli5PDCon = preProcessPerm()
featureScoresCon = preProcessFeatSc()
metricsPerModel = preProcMetricsAllAndSel()
sumPerClassifier = preProcsumPerMetric(factors)
ModelsIDs = preProceModels()
parametersGenPD = parametersGen.to_json(orient='records')
PerClassMetrics = PerClassMetrics.to_json(orient='records')
FeatureAccuracy = FeatureAccuracy.to_json(orient='records')
perm_imp_eli5PDCon = perm_imp_eli5PDCon.to_json(orient='records')
featureScoresCon = featureScoresCon.to_json(orient='records')
XDataJSONEntireSet = XData.to_json(orient='records')
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(sumPerClassifier)) # Position: 0
Results.append(json.dumps(ModelSpaceMDS)) # Position: 1
Results.append(json.dumps(parametersGenPD)) # Position: 2
Results.append(PerClassMetrics) # Position: 3
Results.append(json.dumps(target_names)) # Position: 4
Results.append(FeatureAccuracy) # Position: 5
Results.append(json.dumps(XDataJSON)) # Position: 6
Results.append(0) # Position: 7
Results.append(json.dumps(PredictionSpaceMDS)) # Position: 8
Results.append(json.dumps(metricsPerModel)) # Position: 9
Results.append(perm_imp_eli5PDCon) # Position: 10
Results.append(featureScoresCon) # Position: 11
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 12
Results.append(json.dumps(ModelsIDs)) # Position: 13
Results.append(json.dumps(XDataJSONEntireSet)) # Position: 14
Results.append(json.dumps(yData)) # Position: 15
Results.append(json.dumps(AllTargets)) # Position: 16
Results.append(json.dumps(ModelSpaceUMAP)) # Position: 17
Results.append(json.dumps(PredictionSpaceTSNE)) # Position: 18
Results.append(json.dumps(PredictionSpaceUMAP)) # Position: 19
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRemoveFromStack', methods=["GET", "POST"])
def RetrieveSelClassifiersIDandRemoveFromStack():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
PredictionProbSelUpdate = PreprocessingPredUpdate(ClassifierIDsList)
global resultsUpdatePredictionSpace
resultsUpdatePredictionSpace = []
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[0])) # Position: 0
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[1]))
key = 3
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/UpdatePredictionsSpace', methods=["GET", "POST"])
def SendPredBacktobeUpdated():
response = {
'UpdatePredictions': resultsUpdatePredictionSpace
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
#ComputeMetricsForSel(ClassifierIDsList)
ClassifierIDCleaned = json.loads(ClassifierIDsList)
global keySpecInternal
keySpecInternal = 1
keySpecInternal = ClassifierIDCleaned['keyNow']
EnsembleModel(ClassifierIDsList, 1)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoinLocally', methods=["GET", "POST"])
def RetrieveSelClassifiersIDLocally():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
ComputeMetricsForSel(ClassifierIDsList)
return 'Everything Okay'
def ComputeMetricsForSel(Models):
Models = json.loads(Models)
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 19):
metricsPerModelCollSel[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelCollSel[index] = (1 - metric)*factors[index] * 100
else:
metricsPerModelCollSel[index] = metric*factors[index] * 100
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
response = {
'SelectedMetricsForModels': metricsPerModelCollSel
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
global resultsMetrics
resultsMetrics = []
df_concatMetrics = []
metricsSelList = []
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
paramsListSeptoDicKNN = paramsListSepPD[0].to_dict(orient='list')
paramsListSeptoDicSVC = paramsListSepPD[1].to_dict(orient='list')
paramsListSeptoDicGausNB = paramsListSepPD[2].to_dict(orient='list')
paramsListSeptoDicMLP = paramsListSepPD[3].to_dict(orient='list')
paramsListSeptoDicLR = paramsListSepPD[4].to_dict(orient='list')
paramsListSeptoDicLDA = paramsListSepPD[5].to_dict(orient='list')
paramsListSeptoDicQDA = paramsListSepPD[6].to_dict(orient='list')
paramsListSeptoDicRF = paramsListSepPD[7].to_dict(orient='list')
paramsListSeptoDicExtraT = paramsListSepPD[8].to_dict(orient='list')
paramsListSeptoDicAdaB = paramsListSepPD[9].to_dict(orient='list')
paramsListSeptoDicGradB = paramsListSepPD[10].to_dict(orient='list')
RetrieveParamsCleared = {}
RetrieveParamsClearedListKNN = []
for key, value in paramsListSeptoDicKNN.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListKNN.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListSVC = []
for key, value in paramsListSeptoDicSVC.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGausNB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListMLP = []
for key, value in paramsListSeptoDicMLP.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListMLP.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLR = []
for key, value in paramsListSeptoDicLR.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLR.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLDA = []
for key, value in paramsListSeptoDicLDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListQDA = []
for key, value in paramsListSeptoDicQDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListQDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListRF = []
for key, value in paramsListSeptoDicRF.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListRF.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListExtraT = []
for key, value in paramsListSeptoDicExtraT.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListExtraT.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListAdaB = []
for key, value in paramsListSeptoDicAdaB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListAdaB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGradB = []
for key, value in paramsListSeptoDicGradB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGradB.append(RetrieveParamsCleared)
if (len(paramsListSeptoDicKNN['n_neighbors']) == 0):
RetrieveParamsClearedListKNN = []
if (len(paramsListSeptoDicSVC['C']) == 0):
RetrieveParamsClearedListSVC = []
if (len(paramsListSeptoDicGausNB['var_smoothing']) == 0):
RetrieveParamsClearedListGausNB = []
if (len(paramsListSeptoDicMLP['alpha']) == 0):
RetrieveParamsClearedListMLP = []
if (len(paramsListSeptoDicLR['C']) == 0):
RetrieveParamsClearedListLR = []
if (len(paramsListSeptoDicLDA['shrinkage']) == 0):
RetrieveParamsClearedListLDA = []
if (len(paramsListSeptoDicQDA['reg_param']) == 0):
RetrieveParamsClearedListQDA = []
if (len(paramsListSeptoDicRF['n_estimators']) == 0):
RetrieveParamsClearedListRF = []
if (len(paramsListSeptoDicExtraT['n_estimators']) == 0):
RetrieveParamsClearedListExtraT = []
if (len(paramsListSeptoDicAdaB['n_estimators']) == 0):
RetrieveParamsClearedListAdaB = []
if (len(paramsListSeptoDicGradB['n_estimators']) == 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = RetrieveParamsClearedListKNN
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = RetrieveParamsClearedListSVC
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = RetrieveParamsClearedListGausNB
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListMLP
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListLR
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = RetrieveParamsClearedListLDA
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = RetrieveParamsClearedListQDA
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListRF
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListExtraT
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListAdaB
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints, crossValidation)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
parametersSelDataPD = parametersSelData[2].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[2], paramsListSepPD[2]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfGausNBCleared = dfGausNB
else:
dfGausNBCleared = dfGausNB.drop(dfGausNB.index[set_diff_df])
dicMLP = json.loads(metricsSelList[3])
dfMLP = pd.DataFrame.from_dict(dicMLP)
parametersSelDataPD = parametersSelData[3].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[3], paramsListSepPD[3]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfMLPCleared = dfMLP
else:
dfMLPCleared = dfMLP.drop(dfMLP.index[set_diff_df])
dicLR = json.loads(metricsSelList[4])
dfLR = pd.DataFrame.from_dict(dicLR)
parametersSelDataPD = parametersSelData[4].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[4], paramsListSepPD[4]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLRCleared = dfLR
else:
dfLRCleared = dfLR.drop(dfLR.index[set_diff_df])
dicLDA = json.loads(metricsSelList[5])
dfLDA = pd.DataFrame.from_dict(dicLDA)
parametersSelDataPD = parametersSelData[5].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[5], paramsListSepPD[5]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLDACleared = dfLDA
else:
dfLDACleared = dfLDA.drop(dfLDA.index[set_diff_df])
dicQDA = json.loads(metricsSelList[6])
dfQDA = pd.DataFrame.from_dict(dicQDA)
parametersSelDataPD = parametersSelData[6].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[6], paramsListSepPD[6]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfQDACleared = dfQDA
else:
dfQDACleared = dfQDA.drop(dfQDA.index[set_diff_df])
dicRF = json.loads(metricsSelList[7])
dfRF = pd.DataFrame.from_dict(dicRF)
parametersSelDataPD = parametersSelData[7].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[7], paramsListSepPD[7]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfRFCleared = dfRF
else:
dfRFCleared = dfRF.drop(dfRF.index[set_diff_df])
dicExtraT = json.loads(metricsSelList[8])
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
parametersSelDataPD = parametersSelData[8].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[8], paramsListSepPD[8]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfExtraTCleared = dfExtraT
else:
dfExtraTCleared = dfExtraT.drop(dfExtraT.index[set_diff_df])
dicAdaB = json.loads(metricsSelList[9])
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
parametersSelDataPD = parametersSelData[9].apply(pd.Series)
set_diff_df = | pd.concat([parametersSelDataPD, paramsListSepPD[9], paramsListSepPD[9]]) | pandas.concat |
import pandas as pd
import numpy as np
import joblib
from sys import getsizeof
from chainladder.core.display import TriangleDisplay
from chainladder.core.dunders import TriangleDunders
from chainladder.core.pandas import TrianglePandas
from chainladder.core.slice import TriangleSlicer
class IO:
''' Class intended to allow persistence of triangle or estimator objects
to disk
'''
def to_pickle(self, path, protocol=None):
joblib.dump(self, filename=path, protocol=protocol)
def __contains__(self, value):
if self.__dict__.get(value, None) is None:
return False
return True
@property
def memory_usage(self):
return sum([getsizeof(v) for k, v in self.__dict__.items()])
class TriangleBase(IO, TriangleDisplay, TriangleSlicer,
TriangleDunders, TrianglePandas):
def __init__(self, data=None, origin=None, development=None,
columns=None, index=None, *args, **kwargs):
# Sanitize inputs
index, columns, origin, development = self.str_to_list(
index, columns, origin, development)
key_gr = origin + self.flatten(development, index)
# Aggregate data
data_agg = data.groupby(key_gr).sum().reset_index()
if not index:
index = ['Total']
data_agg[index[0]] = 'Total'
# Initialize origin and development dates and grains
origin_date = TriangleBase.to_datetime(data_agg, origin)
self.origin_grain = TriangleBase._get_grain(origin_date)
if development:
development_date = TriangleBase.to_datetime(
data_agg, development, period_end=True)
self.development_grain = TriangleBase._get_grain(development_date)
col = 'development'
else:
development_date = origin_date
self.development_grain = self.origin_grain
col = None
# Prep the data for 4D Triangle
data_agg = self._get_axes(data_agg, index, columns,
origin_date, development_date)
data_agg = pd.pivot_table(data_agg, index=index+['origin'],
columns=col, values=columns,
aggfunc='sum')
# Assign object properties
self.kdims = np.array(data_agg.index.droplevel(-1).unique())
self.odims = np.array(data_agg.index.levels[-1].unique())
if development:
self.ddims = np.array(data_agg.columns.levels[-1].unique())
self.ddims = self.ddims*({'Y': 12, 'Q': 3, 'M': 1}
[self.development_grain])
self.vdims = np.array(data_agg.columns.levels[0].unique())
else:
self.ddims = np.array([None])
self.vdims = np.array(data_agg.columns.unique())
self.valuation_date = development_date.max()
self.key_labels = index
self.set_slicers()
# Create 4D Triangle
triangle = \
np.reshape(np.array(data_agg), (len(self.kdims), len(self.odims),
len(self.vdims), len(self.ddims)))
triangle = np.swapaxes(triangle, 1, 2)
# Set all 0s to NAN for nansafe ufunc arithmetic
triangle[triangle == 0] = np.nan
self.values = np.array(triangle, dtype=kwargs.get('dtype', None))
# Used to show NANs in lower part of triangle
self.nan_override = False
self.valuation = self._valuation_triangle()
def _len_check(self, x, y):
if len(x) != len(y):
raise ValueError(
'Length mismatch: Expected axis has ',
'{} elements, new values have'.format(len(x)),
' {} elements'.format(len(y)))
def _get_date_axes(self, origin_date, development_date):
''' Function to find any missing origin dates or development dates that
would otherwise mess up the origin/development dimensions.
'''
def complete_date_range(origin_date, development_date,
origin_grain, development_grain):
''' Determines origin/development combinations in full. Useful for
when the triangle has holes in it. '''
origin_unique = pd.period_range(
start=origin_date.min(),
end=origin_date.max(),
freq=origin_grain).to_timestamp()
development_unique = pd.period_range(
start=origin_date.min(),
end=development_date.max(),
freq=development_grain).to_timestamp()
development_unique = TriangleBase._period_end(development_unique)
# Let's get rid of any development periods before origin periods
cart_prod = TriangleBase._cartesian_product(
origin_unique, development_unique)
cart_prod = cart_prod[cart_prod[:, 0] <= cart_prod[:, 1], :]
return pd.DataFrame(cart_prod, columns=['origin', 'development'])
cart_prod_o = complete_date_range(
pd.Series(origin_date.min()), development_date,
self.origin_grain, self.development_grain)
cart_prod_d = complete_date_range(
origin_date, pd.Series(origin_date.max()),
self.origin_grain, self.development_grain)
cart_prod_t = pd.DataFrame({'origin': origin_date,
'development': development_date})
cart_prod = cart_prod_o.append(cart_prod_d, sort=True) \
.append(cart_prod_t, sort=True) \
.drop_duplicates()
cart_prod = cart_prod[cart_prod['development'] >= cart_prod['origin']]
return cart_prod
def _get_axes(self, data_agg, groupby, columns,
origin_date, development_date):
''' Preps axes for the 4D triangle
'''
date_axes = self._get_date_axes(origin_date, development_date)
kdims = data_agg[groupby].drop_duplicates()
kdims['key'] = date_axes['key'] = 1
all_axes = pd.merge(date_axes, kdims, on='key').drop('key', axis=1)
data_agg = all_axes.merge(
data_agg, how='left',
left_on=['origin', 'development'] + groupby,
right_on=[origin_date, development_date] + groupby).fillna(0)
data_agg = data_agg[['origin', 'development'] + groupby + columns]
data_agg['development'] = TriangleBase.development_lag(
data_agg['origin'], data_agg['development'])
return data_agg
def nan_triangle(self):
'''Given the current triangle shape and grain, it determines the
appropriate placement of NANs in the triangle for future valuations.
This becomes useful when managing array arithmetic.
'''
if self.values.shape[2] == 1 or \
self.values.shape[3] == 1 or \
self.nan_override:
# This is reserved for summary arrays, e.g. LDF, Diagonal, etc
# and does not need nan overrides
return np.ones(self.values.shape[2:], dtype='float16')
if len(self.valuation) != len(self.odims)*len(self.ddims) or not \
hasattr(self, '_nan_triangle'):
self.valuation = self._valuation_triangle()
val_array = self.valuation
val_array = val_array.to_timestamp().values.reshape(self.shape[-2:], order='f')
nan_triangle = np.array(
pd.DataFrame(val_array) > self.valuation_date)
nan_triangle = np.array(np.where(nan_triangle, np.nan, 1), dtype='float16')
self._nan_triangle = nan_triangle
return self._nan_triangle
def _valuation_triangle(self, ddims=None):
''' Given origin and development, develop a triangle of valuation
dates.
'''
ddims = self.ddims if ddims is None else ddims
if type(ddims) == pd.PeriodIndex:
return
if ddims[0] is None:
ddims = pd.Series([self.valuation_date]*len(self.origin))
return | pd.DatetimeIndex(ddims.values) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
'''
import time
import csv
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn import cluster
from math import floor
import seaborn as sns
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
censo = pd.read_csv('../mujeres_fecundidad_INE_2018.csv')
'''
for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
#'''
#Se pueden reemplazar los valores desconocidos por un número
#censo = censo.replace(np.NaN,0)
# Sustituimos valores perdidos con la media
for col in censo:
censo[col].fillna(censo[col].mean(), inplace=True)
#seleccionar casos
subset = censo.loc[(censo['TRABAJAACT']==1) & (censo['NDESEOHIJO']<=10)
& (censo['NHOGAR']<=7)]
# Seleccionar variables
usadas = ['NHBIOADOP', 'EDAD', 'NTRABA', 'TEMPRELA', 'NHOGAR']
X = subset[usadas]
X_normal = X.apply(norm_to_zero_one)
print('Tamaño de la población tras filtrado: ',len(X_normal.index))
for col in X:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count, ' AFTER')
algoritmos = (('KMeans', cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)),
('MeanShift', cluster.MeanShift(cluster_all=False, min_bin_freq=3)),
('Ward', cluster.AgglomerativeClustering(n_clusters=5, linkage='ward')),
('DBScan', cluster.DBSCAN(eps=0.35, min_samples=5)),
('Birch', cluster.Birch(threshold=0.1,n_clusters=5)))
cluster_predict = {}
calinski = {}
silh = {}
times = {}
n_clusters = {}
clusters_fig, clusters_axis = plt.subplots(3, 2, figsize=(10,10))
clusters_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
ijs = [(0,0), (0,1), (1,0), (1,1), (2,0), (2,1)]
for i_alg, par in enumerate(algoritmos):
name, alg = par
print('----- Ejecutando ' + name,)
t = time.time()
cluster_predict[name] = alg.fit_predict(X_normal)
tiempo = time.time() - t
times[name] = tiempo
metric_CH = metrics.calinski_harabasz_score(X_normal, cluster_predict[name])
calinski[name] = metric_CH
# El cálculo de Silhouette puede consumir mucha RAM. Si son muchos datos,
# digamos más de 10k, se puede seleccionar una muestra, p.ej., el 20%
#if len(X) > 10000:
# muestra_silhoutte = 0.2
#else:
muestra_silhoutte = 1.0
metric_SC = metrics.silhouette_score(X_normal, cluster_predict[name], metric='euclidean', sample_size=floor(muestra_silhoutte*len(X)), random_state=123456)
silh[name] = metric_SC
# Asignamos de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict[name],index=X.index,columns=['cluster'])
if (name == 'KMeans'):
clusters_kmeans = clusters
alg_kmeans = alg
print("Tamaño de cada cluster:")
size = clusters['cluster'].value_counts()
cluster_fractions = []
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
cluster_fractions.append( 100*i/len(clusters) )
n_clusters[name] = len(size)
# Bar charts
if ( len(cluster_fractions) > 7 ):
cluster_fractions = cluster_fractions[0:6]
i, j = ijs[i_alg]
y_pos = np.arange(len(cluster_fractions))
labels = [ "Cluster " + str(i) for i in range(len(cluster_fractions)) ]
clusters_axis[i, j].bar(y_pos, cluster_fractions, tick_label=labels, color=clusters_colors)
clusters_axis[i, j].set_ylim(0, 100)
clusters_axis[i, j].set_title(name)
if (j == 0):
clusters_axis[i, j].set_ylabel("Cluster size (%)")
clusters_axis[2,1].remove()
# clusters_fig.savefig("clusters.png")
plt.show()
from prettytable import PrettyTable
header = ['Algoritmo', 'CH', 'Silh', 'Tiempo', 'Número de clusters']
tabla = PrettyTable(header)
for name, alg in algoritmos:
tabla.add_row([name,
"{0:.2f}".format(calinski[name]),
"{0:.2f}".format(silh[name]),
"{0:.2f}".format(times[name]),
n_clusters[name]])
print(tabla)
# Escribir los datos en un general.csv
'''
with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
#'''
# ------------------- BUBBLES ---------------------------
plt.clf()
all_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
buble_sizes_template =[i*50 for i in range(1,20)]
cluster_predict = {}
calinski = []
silh = []
buble_sizes = []
param = []
k_clusters = []
# DBScan
rad_values = [r/20 for r in range(1, 10)]
for rad in rad_values:
alg = cluster.DBSCAN(eps=rad, min_samples=20)
cluster_predict = alg.fit_predict(X_normal)
silh.append( float("{0:.2f}".format(
metrics.silhouette_score(X_normal, cluster_predict,
metric='euclidean', sample_size=floor(len(X)), random_state=123456))))
calinski.append( float("{0:.2f}".format(
metrics.calinski_harabasz_score(X_normal, cluster_predict))))
Bclusters = | pd.DataFrame(cluster_predict,index=X.index,columns=['cluster']) | pandas.DataFrame |
import telegram
from pytube import Playlist
import youtube_dl
import os
from telegram.ext import Updater, CallbackQueryHandler, CommandHandler, MessageHandler, ConversationHandler, Filters
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
import requests
import pandas as pd
import random
import logging
PORT = int(os.environ.get("PORT", "ENTER YOUR PORT"))
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
TOKEN = "ENTER YOUR TOKEN"
bot = telegram.Bot(TOKEN)
ONE, TWO, THREE, FOUR, FIVE = range(5)
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def start(update, context):
update.message.reply_text("Thank for using this bot, in here, you can try every functions provided.To see more functions, type /help")
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def music(update, context):
update.message.reply_text("Send youtube link here to play")
return ONE
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def music_reply(update, context):
if update.message.text.startswith("/"):
return ConversationHandler.END
elif "youtu" not in update.message.text:
update.message.reply_text("This is not an effective youtube link")
else:
single_song_downloader(update, update.message.text)
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def single_song_downloader(update, url):
try: # playlist
playlist = Playlist(url)
global links
links = playlist.video_urls # lists of url
update.message.reply_text(f"Songs in playlist: {len(links)}")
update.message.reply_text("How many songs would you like to download? You cannot use other function until songs are all downloaded",
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton("5", callback_data="5"), InlineKeyboardButton("10", callback_data="10")],
[InlineKeyboardButton("15", callback_data="15"), InlineKeyboardButton("20", callback_data="20")],
[InlineKeyboardButton("Download all, I don't mind to wait", callback_data="all")],
]))
return ONE
except: # 單曲
global name
chat_id = update.message.from_user.id
if "youtu" in url:
try:
ydl_opts = {
'outtmpl': '%(title)s.%(ext)s',
'format': 'bestaudio',
'noplaylist': True,
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '190',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url)
name = info['title']
audio = open(name + '.mp3', 'r+b')
bot.send_audio(chat_id=chat_id, audio=audio)
audio.close()
os.remove(name + '.mp3')
except:
update.message.reply_text(f"Maybe this not the good time to download this song: '\n{name}.mp3'")
else:
update.message.reply_text('Does not looks like an youtube link')
update.message.reply_text("Finish!")
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def playlist_downloader(update, urls): # 歌單
global name, num
query = update.callback_query
query.answer("Okay")
chat_id = query.from_user.id
orders = [i for i in range(len(links))]
for i in range(5):
random.shuffle(orders)
if query.data == "5":
num = 5
elif query.data == "10":
num = 10
elif query.data == "15":
num = 15
elif query.data == "20":
num = 20
elif query.data == "all":
num = len(links)
for i in range(num):
link = links[orders[i]]
if "youtu" in link:
try:
ydl_opts = {
'outtmpl': '%(title)s.%(ext)s',
'format': 'bestaudio',
'noplaylist': True,
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '190',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(link)
name = info['title']
# 根據youtube naming 習慣
audio = open(name + '.mp3', 'r+b')
bot.send_audio(chat_id=chat_id, audio=audio)
audio.close()
os.remove(name + '.mp3')
except:
query.message.reply_text("Cannot download this song")
else:
query.message.reply_text('Does not looks like an youtube link')
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def test(update, context):
update.message.reply_text("Coming soon 🙇♂!!")
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def stock(update, context):
update.message.reply_text("Which searching engine would you like to use?",
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton("Yahoo Finance", callback_data="Yahoo Finance")],
[InlineKeyboardButton("Investing.com", callback_data="Investing.com")],
[InlineKeyboardButton("AAStocks", callback_data="AAStocks")],
]))
return ONE
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def stock_agent(update, context):
global Agent
query = update.callback_query
query.answer("Okay")
if query.data == "Yahoo Finance":
Agent = "Yahoo Finance"
query.message.reply_text("Send me the code or name of the stock")
elif query.data == "Investing.com":
Agent = "Investing.com"
query.message.reply_text("Send me the code or name of the stock")
elif query.data == "AAStocks":
Agent = "AAStocks"
query.message.reply_text("Send me the code or name of the stock")
return TWO
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def stock_reply(update, context):
if not update.message.text.startswith("/"):
update.message.reply_text(f"Checking: {update.message.text}")
if Agent == "Yahoo Finance":
yahoo_stock(update, update.message.text)
elif Agent == "Investing.com":
investing_stock(update, update.message.text)
elif Agent == "AAStocks":
aastocks_stock(update, update.message.text)
else:
return ConversationHandler.END
# ★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★━━━━━━━━━━━━━━━━━━━━★
def yahoo_stock(update, stock_code):
try:
url = f"https://partner-query.finance.yahoo.com/v8/finance/chart/{stock_code}?range=1d&comparisons=undefined&includePrePost=false&interval=2m&corsDomain=tw.stock.yahoo.com&.tsrc=yahoo-tw"
# 模擬瀏覽器, 避免http error[403]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'}
response = requests.get(url, headers=headers)
json_data = response.json()
open_ = json_data['chart']['result'][0]['meta']['chartPreviousClose'] # 開市價
close_ = json_data['chart']['result'][0]['meta']['regularMarketPrice'] # 收市價
current_price_ = json_data['chart']['result'][0]['indicators']['quote'][0]['close'][-1] # 目前價位
high_ = max(json_data['chart']['result'][0]['indicators']['quote'][0]['high']) # 最高位
low_ = min(json_data['chart']['result'][0]['indicators']['quote'][0]['low']) # 最低位
update.message.reply_text(
f"Current: ${round(current_price_, 2)}, Open: $ {round(open_, 2)}, Close: ${round(close_, 2)}, \nHighest price: ${round(high_, 2)}, Lowest price: ${round(low_, 2)}")
price_level = json_data['chart']['result'][0]['indicators']['quote'][0]['close'] # 價位s
timestamp = json_data['chart']['result'][0]['timestamp'] # 時間軸s
df = pd.DataFrame({'timestamp': timestamp, 'price_level': price_level})
df['timestamp'] = | pd.to_datetime(df['timestamp'] + 3600 * 8, unit='s') | pandas.to_datetime |
import argparse
import json
import os
import shutil
import sys
from os import path as osp
import h5py
import numpy as np
import pandas
sys.path.append(osp.join(osp.dirname(osp.abspath(__file__)), '..'))
from preprocessing.gen_dataset_v2 import compute_output_time, process_data_source
'''
HDF5 data format
* data.hdf5
|---raw
| |---tango
| |---gyro, gyro_uncalib, acce, magnet, game_rv, gravity, linacce, step, tango_pose, rv,
pressure, (optional) wifi, gps, magnetic_rv, magnet_uncalib
| |--- imu
| |---gyro, gyro_uncalib, acce, magnet, game_rv, gravity, linacce, step. rv, pressure, (optional) wifi,
gps, magnetic_rv, magnet_uncalib
|--synced
| |---gyro, gyro_uncalib, acce, magnet, game_rv, rv, gravity, linacce, step
|---pose
| |---tango_pos, tango_ori, (optional)ekf_ori
The HDF5 file stores all data. "raw" subgroup store all unprocessed data. "synced" subgroup stores synchronized data
(previous stores as "processed/data.pkl"). "pose" subgroup store all pose information, including corrected tango pose
and (optional) EKF orientation.
* info.json
Stores meta information, such as reference time, synchronization, calibration and orientation errors.
To read a HDF5 dataset:
import h5py
with h5py.File(<path-to-hdf5-file>) as f:
gyro = f['synced/gyro']
acce = f['synced/acce']
tango_pos = f['pose/tango_pos']
tango_ori = f['pose/tango_ori']
.....
NOTICE: the HDF5 library will not read the data until it's actually used. For example, all data domains in the
above code are NOT actually read from the disk. This means that if you try to access "gyro" or "acce"
etc. after the "with" closure is released, an error will occur. To avoid this issue, use:
gyro = np.copy(f['synced/gyro'])
to force reading.
'''
_raw_data_sources = ['gyro', 'gyro_uncalib', 'acce', 'magnet', 'game_rv', 'linacce', 'gravity', 'step', 'rv',
'pressure']
_optional_data_sources = ['wifi', 'gps', 'magnetic_rv', 'magnet_uncalib']
_synced_columns = {'time': 'time',
'gyro': ['gyro_x', 'gyro_y', 'gyro_z'],
'gyro_uncalib': ['gyro_uncalib_x', 'gyro_uncalib_y', 'gyro_uncalib_z'],
'acce': ['acce_x', 'acce_y', 'acce_z'],
'magnet': ['magnet_x', 'magnet_y', 'magnet_z'],
'game_rv': ['game_rv_w', 'game_rv_x', 'game_rv_y', 'game_rv_z'],
'rv': ['rv_w', 'rv_x', 'rv_y', 'rv_z'],
'grav': ['grav_x', 'grav_y', 'grav_z'],
'linacce': ['linacce_x', 'linacce_y', 'linacce_z']}
_device_list = ['asus1', 'asus2', 'asus3', 'pixel', 'samsung1', 'samsung2']
_nano_to_sec = 1e09
_micro_to_nano = 1000
def load_wifi_dataset(path):
columns = ['scan', 'last_timestamp', 'BSSID', 'level']
df = | pandas.DataFrame(columns=columns) | pandas.DataFrame |
from numpy import nan
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from shapely.geometry import Point
from pymove import MoveDataFrame, conversions
from pymove.utils.constants import (
DATETIME,
DIST_TO_PREV,
GEOMETRY,
LATITUDE,
LONGITUDE,
SPEED_TO_PREV,
TIME_TO_PREV,
TRAJ_ID,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 1],
]
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def test_lat_meters():
expected = 98224.0229295811
lat_in_meters = conversions.lat_meters(39.984094)
assert(lat_in_meters == expected)
def test_list_to_str():
expected = 'banana,maca,laranja'
joined_list = conversions.list_to_str(['banana', 'maca', 'laranja'])
assert(joined_list == expected)
def test_list_to_csv_str():
expected = 'banana 1:maca 2:laranja'
joined_list = conversions.list_to_svm_line(['banana', 'maca', 'laranja'])
assert(joined_list == expected)
def test_lon_to_x_spherical():
expected = -4285978.172767829
assert(conversions.lon_to_x_spherical(-38.501597) == expected)
def test_lat_to_y_spherical():
expected = -423086.2213610324
assert(conversions.lat_to_y_spherical(-3.797864) == expected)
def test_x_to_lon_spherical():
expected = -38.50159697513617
assert(conversions.x_to_lon_spherical(-4285978.17) == expected)
def test_y_to_lat_spherical():
expected = -35.89350841198311
assert(conversions.y_to_lat_spherical(-4285978.17) == expected)
def test_geometry_points_to_lat_and_lon():
move_df = DataFrame(
data=[['1', Point(116.36184, 39.77529)],
['2', Point(116.36298, 39.77564)],
['3', Point(116.33767, 39.83148)]],
columns=[TRAJ_ID, GEOMETRY],
)
expected_geometry_drop = DataFrame(
data=[['1', 116.36184, 39.77529],
['2', 116.36298, 39.77564],
['3', 116.33767, 39.83148]],
columns=[TRAJ_ID, LONGITUDE, LATITUDE]
)
expected_with_geometry = DataFrame(
data=[['1', Point(116.36184, 39.77529), 116.36184, 39.77529],
['2', Point(116.36298, 39.77564), 116.36298, 39.77564],
['3', Point(116.33767, 39.83148), 116.33767, 39.83148]],
columns=[TRAJ_ID, GEOMETRY, LONGITUDE, LATITUDE]
)
new_move_df = conversions.geometry_points_to_lat_and_lon(
move_df, inplace=False, drop_geometry=True
)
assert_frame_equal(new_move_df, expected_geometry_drop)
new_move_df2 = conversions.geometry_points_to_lat_and_lon(
move_df, inplace=False, drop_geometry=False
)
assert_frame_equal(new_move_df2, expected_with_geometry)
def test_lat_and_lon_decimal_degrees_to_decimal():
move_df = DataFrame(
data=[['0', '28.0N', '94.8W'],
['1', '41.3N', '50.4W'],
['1', '40.8N', '47.5W']],
columns=['id', 'lat', 'lon']
)
expected = DataFrame(
data=[['0', 28.0, -94.8],
['1', 41.3, -50.4],
['1', 40.8, -47.5]],
columns=['id', 'lat', 'lon'],
)
new_move_df = conversions.lat_and_lon_decimal_degrees_to_decimal(move_df)
assert_frame_equal(new_move_df, expected)
def test_ms_to_kmh():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153,
1.0,
49.284551
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403788,
5.0,
5.330727
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.000000,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.ms_to_kmh(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.ms_to_kmh(move_df, new_label='converted_speed', inplace=True)
expected.rename(columns={SPEED_TO_PREV: 'converted_speed'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_kmh_to_ms():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403788,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.000000,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.kmh_to_ms(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.kmh_to_ms(move_df, new_label='converted_speed', inplace=True)
expected.rename(columns={SPEED_TO_PREV: 'converted_speed'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_meters_to_kilometers():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
0.013690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.007403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.meters_to_kilometers(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.meters_to_kilometers(
move_df, new_label='converted_distance', inplace=True
)
expected.rename(columns={DIST_TO_PREV: 'converted_distance'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_kilometers_to_meters():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.kilometers_to_meters(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.kilometers_to_meters(
move_df, new_label='converted_distance', inplace=True
)
expected.rename(columns={DIST_TO_PREV: 'converted_distance'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_seconds_to_minutes():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.016666666666666666,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.08333333333333333,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.seconds_to_minutes(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.seconds_to_minutes(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_minute_to_seconds():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.minute_to_seconds(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.minute_to_seconds(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_minute_to_hours():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.0002777777777777778,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.0013888888888888887,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.minute_to_hours(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.minute_to_hours(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_hours_to_minute():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.016666666666666666,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.08333333333333334,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.hours_to_minute(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.hours_to_minute(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_seconds_to_hours():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.0002777777777777778,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.001388888888888889,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.seconds_to_hours(move_df, inplace=False)
| assert_frame_equal(new_move_df, expected) | pandas.testing.assert_frame_equal |
import os
import sys
sys.path.append('.')
import time
import glob
import argparse
import cv2
import pytesseract
import datetime
import pandas as pd
import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance
import tensorflow as tf
###################################################
# Data Management Class #
# #
# 1. Load h5 files must be stored in train and #
# test folders under the data_dir. #
# 2. Mode must be delivered in order to get #
# ground truth. #
###################################################
class DataManagement(object):
def __init__(self, cfg, brighten_range=0.4):
super(DataManagement, self).__init__()
# init parameters
self.cfg = cfg
self.brighten_range = brighten_range
# load and preprocess data
self.load_data()
self.dilute_data()
self.crop_and_resize_images()
# change images format to numpy
self.transform_images_to_arrays()
self.define_ground_truth()
if self.cfg.data.use_fk or self.cfg.data.use_fd: # prepare arm lengths for forward kinematics loss function
self.compute_arm_lengths()
self.prepare_samples()
if self.cfg.use_dev:
self.prepare_device_data()
else:
self.remove_device_data()
if self.cfg.data.weighted_sampling:
self.prepare_samples_weighting()
self.shuffle_data()
if not self.cfg.data.single_test:
print('Total samples for train: {}.\nTotal samples for test: {}.'.format(len(self.train_df),len(self.test_df)))
else:
print('Total samples for single test: {}.'.format(len(self.test_df)))
# perform normalization
self.normalize_data()
# just a sanity check
self.remove_nan_values()
# generator output types
if self.cfg.mode == 'us2conf2multimidi' or self.cfg.mode == 'us2conf2multikey':
output_types = (tf.float64, tf.float64, tf.float64, tf.int32)
elif self.cfg.mode == 'us2multimidi' or self.cfg.mode == 'us2multikey':
output_types = (tf.float64, tf.int32)
else:
output_types = (tf.float64, tf.float64, tf.float64)
# create generators
self.train_gen = tf.data.Dataset.from_generator(self.train_generator, output_types=output_types).batch(self.cfg.training.batch_size)
self.test_gen = tf.data.Dataset.from_generator(self.test_generator, output_types=output_types).batch(self.cfg.training.batch_size)
# load all the h5 files for training and testing
def load_data(self):
print('Loading dataset...')
train_dfs, test_dfs = [], []
# load train h5 files
for h5_file in [os.path.join(self.cfg.data.path, x) for x in self.cfg.data.train_files]:
# check if file exist
if os.path.isfile(h5_file):
train_dfs.append(pd.read_hdf(path_or_buf=h5_file,key='df'))
train_dfs[-1]['sample_index'] = train_dfs[-1].index
else:
print(f'{h5_file} was not found!')
# load test hf files. make sure to load only one of them if asked
for h5_file in [os.path.join(self.cfg.data.path, x) for x in self.cfg.data.test_files]:
if os.path.isfile(h5_file):
test_dfs.append(pd.read_hdf(path_or_buf=h5_file,key='df'))
test_dfs[-1]['sample_index'] = test_dfs[-1].index
else:
print(f'{h5_file} was not found!')
# share test sessions with train sessions by moving part of each dataframe to train if asked
if self.cfg.data.share_train > 0.0:
# define k-fold
if self.cfg.data.kfold is not None:
k_fold_size = 1 - self.cfg.data.share_train
for i in range(len(test_dfs)):
# define fold start and end indices
fold_start_idx = int(self.cfg.data.kfold * (k_fold_size * len(test_dfs[i])))
fold_end_idx = int((self.cfg.data.kfold + 1) * (k_fold_size * len(test_dfs[i])))
# separate training samples and add them to the list
train_df_bfold = test_dfs[i].iloc[:fold_start_idx]
train_df_afold = test_dfs[i].iloc[fold_end_idx:]
if len(train_df_bfold) > 0:
train_dfs.append(train_df_bfold)
if len(train_df_afold) > 0:
train_dfs.append(train_df_afold)
test_dfs[i] = test_dfs[i].iloc[fold_start_idx:fold_end_idx]
else:
for i in range(len(test_dfs)):
train_dfs.append(test_dfs[i].iloc[:int(len(test_dfs[i]) * self.cfg.data.share_train)])
test_dfs[i] = test_dfs[i].iloc[int(len(test_dfs[i]) * self.cfg.data.share_train):]
# leave samples out of test session if asked
if self.cfg.data.leave_out_test > 0.0:
for i in range(len(test_dfs)):
test_dfs[i] = test_dfs[i].iloc[int(len(test_dfs[i]) * self.cfg.data.leave_out_test):]
# concatenate them into single dataframes
if not self.cfg.data.single_test:
self.train_df = pd.concat(train_dfs)
else:
self.train_df = pd.DataFrame()
self.test_df = pd.concat(test_dfs)
# dilute data by sekecting only samples after strides
def dilute_data(self):
if self.cfg.data.stride > 1:
if not self.cfg.data.single_test:
self.train_df = self.train_df.iloc[::self.cfg.data.stride,:]
self.test_df = self.test_df.iloc[::self.cfg.data.stride,:]
# resize images in dataset, after cropping them if they were not squared
def crop_and_resize_images(self):
if self.cfg.use_imgs:
print('Cropping and resizing images...')
# crop image if needed - width
if len(self.train_df) > 0:
img_size = self.train_df.img.iloc[0].size
else:
img_size = self.test_df.img.iloc[0].size
if img_size[0] > img_size[1]:
offset = img_size[0] - img_size[1]
if not self.cfg.data.single_test:
self.train_df['img'] = self.train_df['img'].apply(lambda x: x.crop((offset//2, 0, img_size[0] - offset//2, img_size[1])))
self.test_df['img'] = self.test_df['img'].apply(lambda x: x.crop((offset//2, 0, img_size[0] - offset//2, img_size[1])))
# crop image if needed - height
if img_size[0] < img_size[1]:
offset = img_size[1] - img_size[0]
if not self.cfg.data.single_test:
self.train_df['img'] = self.train_df['img'].apply(lambda x: x.crop((0, offset//2, img_size[0], img_size[1] - offset//2)))
self.test_df['img'] = self.test_df['img'].apply(lambda x: x.crop((0, offset//2, img_size[0], img_size[1] - offset//2)))
# resize image if asked
if self.cfg.data.res > 0:
if not self.cfg.data.single_test:
self.train_df['img'] = self.train_df['img'].apply(lambda x: x.resize((self.cfg.data.res,self.cfg.data.res)))
self.test_df['img'] = self.test_df['img'].apply(lambda x: x.resize((self.cfg.data.res,self.cfg.data.res)))
else:
print('Removing images...')
# remove images from all dataframes in case training does not require images
if not self.cfg.data.single_test:
self.train_df.drop(columns=['img'], inplace=True)
self.test_df.drop(columns=['img'], inplace=True)
def compute_arm_lengths(self):
# compute mean arm lengths
if self.cfg.data.joints_version == '3' or self.cfg.data.joints_version == '4':
self.arm_lengths = np.array([[self.get_arm_length('finger41','finger42'), self.get_arm_length('finger42','finger43'), self.get_arm_length('finger43','finger44')],
[self.get_arm_length('finger31','finger32'), self.get_arm_length('finger32','finger33'), self.get_arm_length('finger33','finger34')],
[self.get_arm_length('finger21','finger22'), self.get_arm_length('finger22','finger23'), self.get_arm_length('finger23','finger24')],
[self.get_arm_length('finger11','finger12'), self.get_arm_length('finger12','finger13'), self.get_arm_length('finger13','finger14')],
[self.get_arm_length('thumb5','thumb6'), self.get_arm_length('thumb6','thumb7'), 0.0]])
else: # self.cfg.data.joints_version == 1:
self.arm_lengths = np.array([[self.get_arm_length('finger41','finger42'), self.get_arm_length('finger42','finger43')],
[self.get_arm_length('finger31','finger32'), self.get_arm_length('finger32','finger33')],
[self.get_arm_length('finger21','finger22'), self.get_arm_length('finger22','finger23')],
[self.get_arm_length('finger11','finger12'), self.get_arm_length('finger12','finger13')],
[self.get_arm_length('thumb2','thumb3'), self.get_arm_length('thumb3','thumb4')]])
# compute mean arm length given two link names
def get_arm_length(self, link_1, link_2):
if not self.cfg.data.single_test:
return self.train_df.apply(lambda x: np.linalg.norm(x[link_1] - x[link_2]), axis=1).mean()
return self.test_df.apply(lambda x: np.linalg.norm(x[link_1] - x[link_2]), axis=1).mean()
# append images and labels to create stacked inputs and outputs
def prepare_samples(self):
print('Preparing samples...')
# prepare columns for concatenation
concatenated_labels = self.labels_names.copy()
if self.cfg.use_imgs:
concatenated_labels += ['img']
if not self.cfg.data.single_test:
self.train_df[concatenated_labels] = self.train_df[concatenated_labels].applymap(lambda x: [x])
self.test_df[concatenated_labels] = self.test_df[concatenated_labels].applymap(lambda x: [x])
# create new column for stacked images
if self.cfg.use_imgs:
if not self.cfg.data.single_test:
self.train_df['imgs'] = self.train_df['img']
self.test_df['imgs'] = self.test_df['img']
# define labels names for stacked and derived output
self.labels_names_stacked = [x+'s' for x in self.labels_names]
# create timestamp as x-space for gradient computation
if not self.cfg.data.single_test:
self.train_df['timestamp'] = (self.train_df.datetime.astype(int) / 1e6).astype(int).apply(lambda x: [x])
self.test_df['timestamp'] = (self.test_df.datetime.astype(int) / 1e6).astype(int).apply(lambda x: [x])
# create new columns for stacked ground truth
if len(self.labels_names_stacked) > 0:
if not self.cfg.data.single_test:
self.train_df[self.labels_names_stacked] = self.train_df[self.labels_names]
train_df_temp = self.train_df.copy()
self.test_df[self.labels_names_stacked] = self.test_df[self.labels_names]
test_df_temp = self.test_df.copy()
# shift by the step size to append samples together
for i in range(self.cfg.data.step,self.cfg.data.append*self.cfg.data.step,self.cfg.data.step):
# append images
if self.cfg.use_imgs:
if not self.cfg.data.single_test:
self.train_df['imgs'] = self.train_df.shift(i)['img'] + self.train_df['imgs']
self.test_df['imgs'] = self.test_df.shift(i)['img'] + self.test_df['imgs']
# append labels
if len(self.labels_names_stacked) > 0:
if not self.cfg.data.single_test:
self.train_df[self.labels_names_stacked] = train_df_temp.shift(i)[self.labels_names_stacked] + self.train_df[self.labels_names_stacked]
self.test_df[self.labels_names_stacked] = test_df_temp.shift(i)[self.labels_names_stacked] + self.test_df[self.labels_names_stacked]
# append timestamps
if not self.cfg.data.single_test:
self.train_df['timestamp'] = train_df_temp.shift(i)['timestamp'] + self.train_df['timestamp']
self.test_df['timestamp'] = test_df_temp.shift(i)['timestamp'] + self.test_df['timestamp']
# drop rows with missing information
if not self.cfg.data.single_test:
self.train_df = self.train_df.iloc[self.cfg.data.append*self.cfg.data.step-1:]
self.test_df = self.test_df.iloc[self.cfg.data.append*self.cfg.data.step-1:]
# convert labels to numpy for future computations
np_labels = self.labels_names_stacked + ['timestamp']
if not self.cfg.data.single_test:
self.train_df[np_labels] = self.train_df[np_labels].applymap(lambda x: np.array(x))
self.test_df[np_labels] = self.test_df[np_labels].applymap(lambda x: np.array(x))
def prepare_device_data(self):
# set device input names and choose label name based on requested device
self.device_input_pos_names = ['thumb3_dev', 'thumb4_dev']
self.device_input_vel_names = ['thumb3_der', 'thumb4_der']
self.device_input_acc_names = ['thumb3_der_der', 'thumb4_der_der']
self.device_input_names = self.device_input_pos_names + self.device_input_vel_names + self.device_input_acc_names
if self.cfg.mode == 'us2multimidi' or self.cfg.mode == 'us2conf2multimidi':
self.device_label_name = 'notes_multi'
else: # self.cfg.mode == 'us2multikey' or self.cfg.mode == 'us2conf2multikey':
self.device_label_name = 'keys_multi'
self.device_label_name_stacked = self.device_label_name + 's'
self.device_input_pos_names_stacked = [x+'s' for x in self.device_input_pos_names]
self.device_input_vel_names_stacked = [x+'s' for x in self.device_input_vel_names]
self.device_input_acc_names_stacked = [x+'s' for x in self.device_input_acc_names]
self.device_input_names_stacked = [x+'s' for x in self.device_input_names]
# prepare columns for concatenation
if not self.cfg.data.single_test:
self.train_df[self.device_label_name] = self.train_df[self.device_label_name].apply(lambda x: [x])
self.train_df[self.device_input_names] = self.train_df[self.device_input_names].applymap(lambda x: [x])
self.test_df[self.device_label_name] = self.test_df[self.device_label_name].apply(lambda x: [x])
self.test_df[self.device_input_names] = self.test_df[self.device_input_names].applymap(lambda x: [x])
# create new column for stacked notes and backup dataframes
if not self.cfg.data.single_test:
self.train_df[self.device_label_name_stacked] = self.train_df[self.device_label_name]
self.train_df[self.device_input_names_stacked] = self.train_df[self.device_input_names]
train_df_temp = self.train_df.copy()
self.test_df[self.device_label_name_stacked] = self.test_df[self.device_label_name]
self.test_df[self.device_input_names_stacked] = self.test_df[self.device_input_names]
test_df_temp = self.test_df.copy()
# shift by the step size to append device labels together
for i in range(self.cfg.data.step,self.cfg.data.append*self.cfg.data.step,self.cfg.data.step):
# append device labels
if not self.cfg.data.single_test:
self.train_df[self.device_label_name_stacked] = train_df_temp.shift(i)[self.device_label_name_stacked] + self.train_df[self.device_label_name_stacked]
self.test_df[self.device_label_name_stacked] = test_df_temp.shift(i)[self.device_label_name_stacked] + self.test_df[self.device_label_name_stacked]
# append device inputs
if not self.cfg.data.single_test:
self.train_df[self.device_input_names_stacked] = train_df_temp.shift(i)[self.device_input_names_stacked] + self.train_df[self.device_input_names_stacked]
self.test_df[self.device_input_names_stacked] = test_df_temp.shift(i)[self.device_input_names_stacked] + self.test_df[self.device_input_names_stacked]
# drop the old column
if not self.cfg.data.single_test:
self.train_df = self.train_df.drop(columns=[self.device_label_name] + self.device_input_names)
self.test_df = self.test_df.drop(columns=[self.device_label_name] + self.device_input_names)
# backfill device label to save early samples
self.device_data_names = [self.device_label_name_stacked] + self.device_input_names_stacked
if not self.cfg.data.single_test:
self.train_df[self.device_data_names] = self.train_df[self.device_data_names].fillna(method='bfill')
self.test_df[self.device_data_names] = self.test_df[self.device_data_names].fillna(method='bfill')
# convert device label to numpy array
if not self.cfg.data.single_test:
self.train_df[self.device_data_names] = self.train_df[self.device_data_names].applymap(lambda x: np.array(x))
self.test_df[self.device_data_names] = self.test_df[self.device_data_names].applymap(lambda x: np.array(x))
# don't keep what you don't need
def remove_device_data(self):
labels_to_remove = []
if 'keys' in self.train_df.columns:
labels_to_remove.append('keys')
if 'keys_multi' in self.train_df.columns:
labels_to_remove.append('keys_multi')
if 'notes' in self.train_df.columns:
labels_to_remove.append('notes')
if 'notes_multi' in self.train_df.columns:
labels_to_remove.append('notes_multi')
# remove device labels
if not self.cfg.data.single_test:
self.train_df.drop(columns=labels_to_remove, inplace=True)
self.test_df.drop(columns=labels_to_remove, inplace=True)
# initiate column with weights for sampling that will be used in the train generator
def prepare_samples_weighting(self):
self.train_df['weight'] = self.train_df.apply(lambda x: 1 / (x[self.labels_names_stacked].mean()[-1]), axis=1)
# shuffle datasets
def shuffle_data(self):
if not self.cfg.data.single_test and self.cfg.data.shuffle: # do not shuffle if single test is required
self.train_df = self.train_df.sample(frac=1.0)
self.test_df = self.test_df.sample(frac=1.0)
# transform images from PIL images to numpy arrays
def transform_images_to_arrays(self):
if self.cfg.use_imgs:
print('Transforming images back to arrays...')
if not self.cfg.data.single_test:
self.train_df.img = self.train_df.img.apply(lambda x: np.asarray(x))
self.test_df.img = self.test_df.img.apply(lambda x: np.asarray(x))
# extract labels as an array of shape [NC] for train and test
def define_ground_truth(self):
if self.cfg.data.joints_version == '0' or not self.cfg.use_conf: # other mode that not using joints
self.labels_names = []
if self.cfg.data.joints_version == '1s':
self.labels_names = ['joint41', 'joint31', 'joint21', 'joint11']
elif self.cfg.data.joints_version == '2':
self.labels_names = ['joint42', 'joint32', 'joint22', 'joint12',\
'joint43', 'joint33', 'joint23', 'joint13',\
'joint44', 'joint34','joint24', 'joint14',\
'jointt3', 'jointt4', 'jointwy']
elif self.cfg.data.joints_version == '2c':
self.labels_names = ['joint4234', 'joint3234', 'joint2234', 'joint1234',\
'jointt34', 'jointwy']
elif self.cfg.data.joints_version == '3':
self.labels_names = ['joint41', 'joint31', 'joint21', 'joint11',\
'joint42', 'joint32', 'joint22', 'joint12',\
'joint43', 'joint33', 'joint23', 'joint13',\
'jointt5', 'jointt6', 'wristy']
elif self.cfg.data.joints_version == '4':
self.labels_names = ['joint41', 'joint31', 'joint21', 'joint11',\
'joint42', 'joint32', 'joint22', 'joint12',\
'joint43', 'joint33', 'joint23', 'joint13',\
'jointt5', 'jointt6',\
'jointwr', 'jointwp', 'jointwy']
elif self.cfg.data.joints_version == '1':
self.labels_names = ['joint41', 'joint31', 'joint21', 'joint11', 'joint42','joint32',\
'joint22', 'joint12', 'jointt2', 'jointt3', 'jointt1']
else: # self.cfg.data.joints_version == 'custom'
self.labels_names = self.cfg.data.joint_names
# data normalization function. choose from ['min_max', 'z_score']
# images normalization is done temporarily using the generators
def normalize_data(self, auto=False, old_min_x=0.0, old_max_x=255.0, old_min_y=0.0, old_max_y=np.pi,\
new_min_x=0.0, new_max_x=1.0, new_min_y=0.0, new_max_y=1.0):
print('Normalizing data...')
if len(self.labels_names_stacked) > 0 and self.cfg.data.normalization == 'min_max':
# define min-max ranges for Y
if auto:
if not self.cfg.data.single_test:
old_min_y = np.vstack(self.train_df[self.labels_names_stacked].stack().values).min()
old_max_y = np.vstack(self.train_df[self.labels_names_stacked].stack().values).max()
else:
old_min_y = np.vstack(self.test_df[self.labels_names_stacked].stack().values).min()
old_max_y = np.vstack(self.test_df[self.labels_names_stacked].stack().values).max()
y_old_range = old_max_y - old_min_y
y_new_range = new_max_y - new_min_y
# normalize Y using min-max
if not self.cfg.data.single_test:
self.train_df[self.labels_names_stacked] = new_min_y + y_new_range * (self.train_df[self.labels_names_stacked] - old_min_y) / y_old_range
self.test_df[self.labels_names_stacked] = new_min_y + y_new_range * (self.test_df[self.labels_names_stacked] - old_min_y) / y_old_range
elif len(self.labels_names_stacked) > 0: # self.cfg.data.normalization == 'z_score':
if not self.cfg.data.single_test:
self.Y_train_mean = self.train_df[self.labels_names_stacked].values.mean().mean()
self.Y_train_std = self.train_df[self.labels_names_stacked].values.std().mean()
self.train_df[self.labels_names_stacked] = (self.train_df[self.labels_names_stacked] - self.Y_train_mean) / self.Y_train_std
# save mean and std for later use
stats_df = | pd.DataFrame(data={'y_train_mean':[self.Y_train_mean],'y_train_std':[self.Y_train_std]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-.
"""
Created on Tue Jan 21 13:04:58 2020
@author: xavier.mouy
"""
from ecosound.core.annotation import Annotation
import ecosound.core.tools
import pandas as pd
import xarray as xr
import os
class Measurement(Annotation):
def __init__(self, measurer_name=None, measurer_version=None, measurements_name=None):
""" Measurement object.
Object to "store" sound measurements. Inheritate all methods from the
ecosound Annotaion class.
Parameters
----------
measurer_name : str, optional
Name of the measurer that was used to calculate the measurements.
The default is None.
measurer_version : str, optional
Version of the measurer that was used to calculate the measurements.
The default is None.
measurements_name : list of str, optional
List with the name of each measurement. The default is None.
Returns
-------
None. ecosound Measurement object with a .data and .metadata dataframes
"""
super(Measurement, self).__init__()
metadata = {'measurer_name': measurer_name,
'measurer_version': measurer_version,
'measurements_name': [measurements_name],
}
self._metadata = pd.DataFrame(metadata)
self.data = pd.concat([self.data, | pd.DataFrame(columns=metadata['measurements_name'][0]) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
from numpy import ndarray
from typing import List, Dict, Any
from .tree_models import *
from .cv_score import cv_score
def get_best_model(
datasets: List[Dict[str, ndarray]],
mode: str='reg',
random_state: int=42,
n_jobs: Optional[int]=None,
) -> Dict[str, Any]:
# Initialize progress bar
progress = 0.0
bar = st.progress(progress)
# Get best model
if mode == 'reg':
best_model = None
history = []
score = np.inf
for model_func in [lgb_reg, xgb_reg, rf_reg, et_reg]:
output = cv_score(
model_func = model_func,
datasets = datasets,
random_state = random_state,
n_jobs = n_jobs
)
# write history
history.append(
pd.DataFrame(
{
output['name']: output['score']
}
).T
)
# update best_model
if output['score']['mae'] < score:
score = output['score']['mae']
best_model = output
# update progress bar
progress += 0.25
bar.progress(progress)
history = | pd.concat(history, axis=0) | pandas.concat |
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
| tm.assert_index_equal(unshifted.index, ps.index) | pandas._testing.assert_index_equal |
import re
import os
import pdb
import glob
import pprint
import logging
import subprocess
import bs4 as bs
import numpy as np
import pandas as pd
import xarray as xr
import linecache as lc
import urllib.request
from functools import reduce
from geopy.distance import distance
from time import perf_counter, sleep
from contextlib import contextmanager
from .parsing_instructions import fort15_instructions
from .adcirc_param_parser import ParamParser
P_CONFIGS = {}
pd.options.display.float_format = "{:,.10f}".format
logger = logging.getLogger()
@contextmanager
def timing(label: str):
t0 = perf_counter()
yield lambda: (label, t1 - t0)
t1 = perf_counter()
def get_def(param: str):
try:
desc = P_CONFIGS[param]
pprint.pp(f"{param} = {desc}")
except:
print(f"Did not find parameter {param}'")
pass
def update_param_configs(url: str):
global P_CONFIGS
P_CONFIGS = pull_param_configs(url)
def pull_param_configs(
url: str = "https://adcirc.org/home/documentation/users-manual-v53/parameter-definitions",
):
params = {}
try:
source = urllib.request.urlopen(url).read()
rows = bs.BeautifulSoup(source, "lxml").findAll("p", {"class": "MsoNormal"})
for row in rows:
p_name = row.text.split()[0]
if "(" in p_name:
p_name = p_name.split("(")[0]
params[p_name] = " ".join(row.text.split()[2:])
except:
pass
return params
def read_param_line(out, params, f, ln=None, dtypes=None):
if ln:
line = lc.getline(f, ln)
else:
line = f.readline().strip()
logger.info(",".join(params) + " : " + line)
vals = [x for x in re.split("\\s+", line) if x != ""]
for i in range(len(params)):
try:
if dtypes:
out.attrs[params[i]] = dtypes[i](vals[i])
else:
out.attrs[params[i]] = vals[i]
except ValueError:
out.attrs[params[i]] = np.nan
if ln:
ln += 1
return out, ln
else:
return out
def read_text_line(out, param, f, ln=None):
if ln:
line = lc.getline(f, ln).strip()
else:
line = f.readline().strip()
logger.info(param + " : " + line)
out.attrs[param] = line
if ln:
ln += 1
return out, ln
else:
return out
def write_numeric_line(vals, f):
line = " ".join([str(v) for v in vals])
f.write(line + "\n")
def write_param_line(ds, params, f):
if type(ds) == xr.Dataset:
line = " ".join([str(ds.attrs[p]) for p in params])
else:
line = " ".join([str(x) for x in ds])
if len(line) < 80:
line += ((80 - len(line)) * " ") + "! " + ",".join(params)
else:
logger.warning("WARNING - fort config files shouldn't be wider than 80 cols!")
logger.info("Writing param line for " + ",".join(params) + " - " + line)
f.write(line + "\n")
def write_text_line(ds, param, f):
if type(ds) == xr.Dataset:
line = ds.attrs[param]
else:
line = ds
logger.info("Writing text line for " + param + " - " + line)
if len(line) < 80:
if param != "":
line += ((80 - len(line)) * " ") + "! " + param
else:
logger.warning("WARNING - fort config files shouldn't be wider than 80 cols!")
f.write(line + "\n")
def find_closest(x, y, x_t, y_t):
closest = 1e20
closest_idx = None
logger.info(f"Searching {len(x)} nodes for closest node to {x_t},{y_t}")
for i, v in enumerate(zip(x, y)):
dis = distance(v, (x_t, y_t)).km
if dis < closest:
closest = dis
closest_idx = i
logger.info(
f"Found closest at index {closest_idx} and coordiante {x.item(closest_idx)}, {y.item(closest_idx)} with distance :{closest}"
)
return closest_idx
def get_latest_ts(path: str):
if "\n" in path:
tail = path
else:
res = subprocess.run(["tail", path], stdout=subprocess.PIPE)
if res.returncode != 0:
raise Exception("Unable to access adcirc.log file.")
tail = res.stdout.decode("utf-8")
for line in reversed(tail.split("\n")):
split = re.split("TIME STEP = ", line)
if len(split) > 1:
ts = split[1].split(" ")[1]
return ts
raise Exception("adcirc.log found but time stepping hasn't started yet.")
def snatch_fort14_params(fname):
"""Read a few key parameters from the fort.14 file - not the actual mesh
"""
res = {}
with open(fname, "r") as fp:
fp.readline()
res["NE"], res["NP"] = map(int, fp.readline().strip().split()[:2])
for i in range(res["NE"] + res["NP"]): fp.readline()
res["NOPE"] = int(fp.readline().split()[0])
res["NETA"] = int(fp.readline().split()[0])
return res
def read_fort14(f14_file, ds=None):
"""read_fort14.
Reads in ADCIRC fort.14 f14_file
:param f14_file: Path to Python file.
"""
if type(ds) != xr.Dataset:
ds = xr.Dataset()
# 1 : AGRID = alpha-numeric grid identification (<=24 characters).
ds, ln = read_text_line(ds, "AGRID", f14_file, ln=1)
# 2 : NE, NP = number of elements, nodes in horizontal grid
ds, ln = read_param_line(ds, ["NE", "NP"], f14_file, ln=ln, dtypes=2 * [int])
# 3-NP : NODES
# for k=1 to NP
# JN, X(JN), Y(JN), DP(JN)
# end k loop
logger.info("Reading Node Map.")
ds = xr.merge(
[
ds,
pd.read_csv(
f14_file,
delim_whitespace=True,
nrows=ds.attrs["NP"],
skiprows=ln - 1,
header=None,
names=["JN", "X", "Y", "DP"],
)
.set_index("JN")
.to_xarray(),
],
combine_attrs="override",
)
ln += ds.attrs["NP"]
# (2+NP)-(2+NP+NE) : ELEMENTS
# for k=1 to NE
# JE, NHY, NM(JE,1),NM(JE,2), NM(JE,3)
# end k loop
logger.info("Reading Element Map.")
ds = xr.merge(
[
ds,
pd.read_csv(
f14_file,
delim_whitespace=True,
nrows=ds.attrs["NE"],
skiprows=ln - 1,
header=None,
names=["JE", "NHEY", "NM_1", "NM_2", "NM_3"],
)
.set_index("JE")
.to_xarray(),
],
combine_attrs="override",
)
ln += ds.attrs["NE"]
# (3+NP+NE) : NOPE = number of elevation specified boundary forcing segments.
ds, ln = read_param_line(ds, ["NOPE"], f14_file, ln=ln, dtypes=[int])
# (4+NP+NE) : NETA = total number of elevation specified boundary nodes
ds, ln = read_param_line(ds, ["NETA"], f14_file, ln=ln, dtypes=[int])
# Rest of the file contains boundary information. Read all at once
bounds = pd.read_csv(
f14_file, delim_whitespace=True, header=None, skiprows=ln - 1, usecols=[0]
)
bounds["BOUNDARY"] = None
bounds["IBTYPEE"] = None
bounds["IBTYPE"] = None
bounds = bounds.rename(columns={0: "BOUNDARY_NODES"})
# Get elevation sepcified boundary forcing segments
bnd_idx = 0
for i in range(ds.attrs["NOPE"]):
sub = xr.Dataset()
logger.info("Reading NOPE #" + str(i))
# NVDLL(k), IBTYPEE(k) = number of nodes, and boundary type
sub, ln = read_param_line(
sub, ["NVDLL", "IBTYPEE"], f14_file, ln=ln, dtypes=2 * [int]
)
bounds = bounds.drop(bnd_idx)
bounds.loc[bnd_idx : bnd_idx + sub.attrs["NVDLL"], "BOUNDARY"] = i
bounds.loc[bnd_idx : bnd_idx + sub.attrs["NVDLL"], "IBTYPEE"] = sub.attrs[
"IBTYPEE"
]
ln += sub.attrs["NVDLL"]
bnd_idx += sub.attrs["NVDLL"] + 1
bounds["BOUNDARY_NODES"] = bounds["BOUNDARY_NODES"].astype(int)
elev_bounds = bounds[["BOUNDARY", "BOUNDARY_NODES", "IBTYPEE"]].dropna()
elev_bounds["ELEV_BOUNDARY"] = elev_bounds["BOUNDARY"].astype(int)
elev_bounds["ELEV_BOUNDARY_NODES"] = elev_bounds["BOUNDARY_NODES"].astype(int)
elev_bounds["IBTYPEE"] = elev_bounds["IBTYPEE"].astype(int)
elev_bounds = elev_bounds.drop(["BOUNDARY", "BOUNDARY_NODES"], axis=1)
ds = xr.merge(
[ds, elev_bounds.set_index("ELEV_BOUNDARY").to_xarray()],
combine_attrs="override",
)
# NBOU = number of normal flow (discharge) specified boundary segments
bounds = bounds.drop(bnd_idx)
bnd_idx += 1
ds, ln = read_param_line(ds, ["NBOU"], f14_file, ln=ln, dtypes=[int])
# NVEL = total number of normal flow specified boundary nodes
bounds = bounds.drop(bnd_idx)
bnd_idx += 1
ds, ln = read_param_line(ds, ["NVEL"], f14_file, ln=ln, dtypes=[int])
# Get flow sepcified boundary segments
for i in range(ds.attrs["NBOU"]):
logger.info("Reading NBOU #" + str(i))
# NVELL(k), IBTYPE(k)
sub, ln = read_param_line(
sub, ["NVELL", "IBTYPE"], f14_file, ln=ln, dtypes=2 * [int]
)
bounds = bounds.drop(bnd_idx)
bounds.loc[bnd_idx : bnd_idx + sub.attrs["NVELL"], "BOUNDARY"] = (
i + ds.attrs["NOPE"]
)
bounds.loc[bnd_idx : bnd_idx + sub.attrs["NVELL"], "IBTYPE"] = sub.attrs[
"IBTYPE"
]
ln += sub.attrs["NVELL"]
bnd_idx += sub.attrs["NVELL"] + 1
normal_bounds = bounds[["BOUNDARY", "BOUNDARY_NODES", "IBTYPE"]].dropna()
normal_bounds["NORMAL_BOUNDARY"] = (
normal_bounds["BOUNDARY"].astype(int) - ds.attrs["NOPE"]
)
normal_bounds["NORMAL_BOUNDARY_NODES"] = normal_bounds["BOUNDARY_NODES"].astype(int)
normal_bounds["IBTYPE"] = normal_bounds["IBTYPE"].astype(int)
normal_bounds = normal_bounds.drop(["BOUNDARY", "BOUNDARY_NODES"], axis=1)
ds = xr.merge(
[ds, normal_bounds.set_index("NORMAL_BOUNDARY").to_xarray()],
combine_attrs="override",
)
return ds
def read_fort15(f15_file, ds=None):
"""read_fort15.
Reads in ADCIRC fort.15 f15_file
Args:
f15_file (str) - Path to parameter file.
ds (dict) - a dictionary with parameter data
"""
if ds is None:
ds = {}
if "NETA" not in ds:
# we need NETA in order to parse the fort15 file
# Try to find the fort.14 file
dirname = os.path.dirname(f15_file)
f14_file = dirname + "/fort.14" if dirname else "fort.14"
if os.path.exists(f14_file):
ds.update(snatch_fort14_params(f14_file))
else:
raise ValueError("NETA must be provided in order to parse the fort.15 file!"
f"Tried and failed to find the fort.14 file in the directory '{dirname}'!")
parser = ParamParser(fort15_instructions)
return parser.parse(f15_file, starting_params=ds)
def read_fort13(f13_file, ds=None):
if type(ds) != xr.Dataset:
ds = xr.Dataset()
ds, ln = read_param_line(ds, ["AGRID"], f13_file, ln=1)
# Note this must match NP
ds, ln = read_param_line(ds, ["NumOfNodes"], f13_file, ln=ln, dtypes=[int])
# Note this must be >= NWP
ds, ln = read_param_line(ds, ["NAttr"], f13_file, ln=ln, dtypes=[int])
# Read Nodal Attribute info
nodals = []
for i in range(ds.attrs["NAttr"]):
tmp, ln = read_param_line(xr.Dataset(), ["AttrName"], f13_file, ln=ln)
tmp, ln = read_param_line(tmp, ["Units"], f13_file, ln=ln)
tmp, ln = read_param_line(tmp, ["ValuesPerNode"], f13_file, ln=ln, dtypes=[int])
tmp, ln = read_param_line(
tmp,
["v" + str(i) for i in range(tmp.attrs["ValuesPerNode"])],
f13_file,
ln=ln,
dtypes=tmp.attrs["ValuesPerNode"] * [float],
)
nodals.append(tmp.attrs)
ds = xr.merge(
[ds, pd.DataFrame(nodals).set_index("AttrName").to_xarray()],
combine_attrs="override",
)
# Read Non Default Nodal Attribute Values
non_default = []
line = lc.getline(f13_file, ln)
while line != "":
tmp, ln = read_param_line(tmp, ["AttrName"], f13_file, ln=ln)
tmp, ln = read_param_line(tmp, ["NumND"], f13_file, ln=ln, dtypes=[int])
num_vals = ds["ValuesPerNode"][ds["AttrName"] == tmp.attrs["AttrName"]].values[
0
]
cols = ["JN"] + [
"_".join([tmp.attrs["AttrName"], str(x)]) for x in range(num_vals)
]
tmp_df = pd.read_csv(
f13_file,
skiprows=ln - 1,
nrows=tmp.attrs["NumND"],
delim_whitespace=True,
names=cols,
)
non_default.append(tmp_df)
ln += tmp.attrs["NumND"]
line = lc.getline(f13_file, ln)
ds = xr.merge(
[
ds,
reduce(lambda x, y: x.merge(y, how="outer"), non_default)
.set_index("JN")
.to_xarray(),
],
combine_attrs="override",
)
return ds
def read_fort22(f22_file, NWS=12, ds=None):
if type(ds) == xr.Dataset:
if "NWS" in ds.attrs.keys():
NWS = ds.attrs["NWS"]
else:
ds = xr.Dataset()
if NWS in [12, 12012]:
ds, _ = read_param_line(ds, ["NWSET"], f22_file, ln=1, dtypes=[float])
ds, _ = read_param_line(ds, ["NWBS"], f22_file, ln=2, dtypes=[float])
ds, _ = read_param_line(ds, ["DWM"], f22_file, ln=3, dtypes=[float])
else:
msg = f"NWS {NWS} Not yet implemented!"
logger.error(msg)
raise Exception(msg)
return ds
def read_fort24(f22_file, ds=None):
if type(ds) != xr.Dataset:
ds = xr.Dataset()
data = pd.read_csv(
f22_file,
delim_whitespace=True,
names=["JN", "SALTAMP", "SALTPHA"],
low_memory=False,
header=None,
)
tides = data[data["SALTPHA"].isna()]
all_tmp = []
for i in range(int(tides.shape[0] / 4)):
stop = (tides.index[(i + 1) * 4] - 1) if i != 7 else data.index[-1]
tmp = data.loc[(tides.index[i * 4 + 3] + 1) : stop][
["JN", "SALTAMP", "SALTPHA"]
].copy()
tmp["JN"] = tmp["JN"].astype(int)
tmp["SALTAMP"] = tmp["SALTAMP"].astype(float)
tmp["SALTPHA"] = tmp["SALTPHA"].astype(float)
tmp["SALTFREQ"] = float(tides["JN"].iloc[i * 4 + 1])
tmp = tmp.set_index("JN").to_xarray()
tmp = tmp.expand_dims(dim={"SALTNAMEFR": [tides["JN"].iloc[i * 4 + 3]]})
all_tmp.append(tmp)
ds = xr.merge([ds, xr.concat(all_tmp, "SALTNAMEFR")], combine_attrs="override")
return ds
def read_fort25(f25_file, NWS=12, ds=None):
if ds != None:
if "NWS" in ds.attrs.keys():
NWS = ds.attrs["NWS"]
else:
ds = xr.Dataset()
if NWS in [12, 12012]:
ds, _ = read_param_line(ds, ["NUM_ICE_FIELDS"], f25_file, ln=1, dtypes=[float])
ds, _ = read_param_line(
ds, ["NUM_BLANK_ICE_SNAPS"], f25_file, ln=2, dtypes=[float]
)
else:
msg = f"NWS {NWS} Not yet implemented!"
logger.error(msg)
raise Exception(msg)
return ds
def read_fort221(f221_file, NWS=12, times=[], ds=None):
if ds != None:
if "NWS" in ds.attrs.keys():
NWS = ds.attrs["NWS"]
else:
ds = xr.Dataset()
if NWS in [12, 12012]:
pressure_data = read_owi_met(f221_file, vals=["press"], times=times)
else:
msg = f"NWS {NWS} Not yet implemented!"
logger.error(msg)
raise Exception(msg)
attrs = {"press_" + str(key): val for key, val in pressure_data.attrs.items()}
pressure_data.attrs = attrs
return xr.merge([ds, pressure_data], combine_attrs="no_conflicts")
def read_fort222(f222_file, NWS=12, times=[], ds=None):
if ds != None:
if "NWS" in ds.attrs.keys():
NWS = ds.attrs["NWS"]
else:
ds = xr.Dataset()
if NWS in [12, 12012]:
wind_data = read_owi_met(f222_file, vals=["u_wind", "v_wind"], times=times)
else:
msg = f"NWS {NWS} Not yet implemented!"
logger.error(msg)
raise Exception(msg)
attrs = {"wind_" + str(key): val for key, val in wind_data.attrs.items()}
wind_data.attrs = attrs
return xr.merge([ds, wind_data], combine_attrs="no_conflicts")
def read_fort225(f225_file, NWS=12, times=[], ds=None):
if ds != None:
if "NWS" in ds.attrs.keys():
NWS = ds.attrs["NWS"]
else:
ds = xr.Dataset()
if NWS in [12, 12012]:
ice_data = read_owi_met(f225_file, vals=["ice_cov"], times=times)
else:
msg = f"NWS {NWS} Not yet implemented!"
logger.error(msg)
raise Exception(msg)
attrs = {"ice_" + str(key): val for key, val in ice_data.attrs.items()}
ice_data.attrs = attrs
return xr.merge([ds, ice_data], combine_attrs="no_conflicts")
def read_owi_met(path, vals=["v1"], times=[0]):
# NWS 12 - Ocean Weather Inc (OWI) met data
attrs = {}
# Title line:
# 10 format (t56,i10,t71,i10)
# read (20,10) date1,date2
line = lc.getline(path, 1)
attrs["source"] = line[0:56]
attrs["start_ts"] = pd.to_datetime(line[55:66].strip(), format="%Y%m%d%H")
attrs["end_ts"] = pd.to_datetime(line[70:80].strip(), format="%Y%m%d%H")
if len(lc.getline(path, 2)) > 79:
tf = "%Y%m%d%H%M%S"
ti_idx = 67
else:
tf = "%Y%m%d%H"
ti_idx = 68
cur_line = 2
all_data = []
line = lc.getline(path, cur_line)
for t in times:
if line == "":
break
# Grid Spec Line:
# 11 format (t6,i4,t16,i4,t23,f6.0,t32,f6.0,t44,f8.0,t58,f8.0,t69,i10,i2)
# read (20,11) iLat, iLong, dx, dy, swlat, swlong, lCYMDH, iMin
grid_spec = re.sub("[^\-0-9=.]", "", line)[1:].split("=")
ilat = int(grid_spec[0])
ilon = int(grid_spec[1])
dx = float(grid_spec[2])
dy = float(grid_spec[3])
swlat = float(grid_spec[4])
swlon = float(grid_spec[5])
ts = | pd.to_datetime(grid_spec[6], format=tf) | pandas.to_datetime |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = | pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1) | pandas.concat |
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load pairs
pairs = pm.Promat.get_pairs()
ipsi_pair_ids = pm.Promat.load_pairs_from_annotation('mw ipsilateral axon', pairs, return_type='all_pair_ids')
bilateral_pair_ids = pm.Promat.load_pairs_from_annotation('mw bilateral axon', pairs, return_type='all_pair_ids')
contra_pair_ids = pm.Promat.load_pairs_from_annotation('mw contralateral axon', pairs, return_type='all_pair_ids')
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(x, pairs, return_type='all_pair_ids') for x in pymaid.get_annotated('mw brain inputs').name]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
# %%
# EXPERIMENT 1: removing edges from contralateral and bilateral neurons -> effect on path length?
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 40
# excise edges and generate graphs
e_contra_contra, e_contra_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_contra, e_bi_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_ipsi, e_bi_ipsi_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'ipsilateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_all_contra, e_all_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids + contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# this chunk is incomplete
# write all graphs to graphml
# read all graph from graphml
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
shuffled_graphs = Parallel(n_jobs=-1)(delayed(nx.readwrite.graphml.read_graphml)(f'interhemisphere/csv/shuffled_graphs/iteration-{i}.graphml', node_type=int, edge_key_type=str) for i in tqdm(range(n_init)))
shuffled_graphs = [pg.Analyze_Nx_G(edges=x.edges, graph=x) for x in shuffled_graphs]
# %%
# generate and save paths
cutoff=5
# generate and save paths for experimental
save_path = [f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra']
experimental = [e_contra_contra, e_bi_contra, e_bi_ipsi, e_all_contra]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(experimental[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=save_path[i]) for i in tqdm((range(len(experimental)))))
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_contra_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_ipsi_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_all_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths(excise_paths, control_paths, edges_removed):
excise_count = len(excise_paths)
control_counts = [len(x) for x in control_paths]
path_counts_data = []
for row in zip(control_counts, [f'control-{edges_removed}']*len(control_counts)):
path_counts_data.append(row)
path_counts_data.append([excise_count, f'excised-{edges_removed}'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}.csv')
# count per # hops
excise_path_counts = [len(x) for x in excise_paths]
control_path_counts = [[len(x) for x in path] for path in control_paths]
path_counts_length_data = []
for i, path_length in enumerate(control_path_counts):
for row in zip(path_length, [f'control-{edges_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for row in zip(excise_path_counts, [f'excised-{edges_removed}']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}_path_lengths.csv')
cutoff=5
n_init = 40
excise_Cc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra.csv.gz')
control_Cc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Cc_paths, control_Cc_paths, edges_removed='Contra-contra')
excise_Bc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra.csv.gz')
control_Bc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bc_paths, control_Bc_paths, edges_removed='Bilateral-contra')
excise_Bi_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi.csv.gz')
control_Bi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bi_paths, control_Bi_paths, edges_removed='Bilateral-ipsi')
excise_Ac_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra.csv.gz')
control_Ac_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Ac_paths, control_Ac_paths, edges_removed='All-contra')
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
excise_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([excise_count, f'wildtype'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/wildtype.csv')
path_counts_length_data = []
excise_path_counts = [len(x) for x in graph_paths]
for row in zip(excise_path_counts, [f'wildtype']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/wildtype_path_lengths.csv')
# %%
##########
# EXPERIMENT 2: removing random number of ipsi vs contra edges, effect on paths
#
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 8
# excise edges and generate graphs
random_ipsi500, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 500, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi1000, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 1000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi2000, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 2000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi4000, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 4000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# generate and save paths
cutoff=5
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_paths, contra_paths, count_removed):
ipsi_counts = [len(x) for x in ipsi_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_counts, [f'ipsi-{count_removed}']*len(ipsi_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra-{count_removed}']*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_path_counts = [[len(x) for x in path] for path in ipsi_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_path_counts):
for row in zip(path_length, [f'ipsi-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
# %%
##########
# EXPERIMENT 3: removing random number of ipsi vs contra edges, effect on paths on just one side of brain
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
#random_ipsi8764_left, random_ipsi8764_right, random_contra8764 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8764, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
count = 8764
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv')
path_counts_length_data = []
path_counts = [len(x) for x in graph_paths]
for row in zip(path_counts, [f'wildtype']*len(path_counts), [0]*len(path_counts), [0]*len(path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
# %%
# plot total paths per condition from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed.csv', index_col=0)], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv', index_col=0)
total_paths = pd.concat([total_paths, pd.DataFrame([[wildtype['count'].values[0], 'contra', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-left', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-right', 0]], columns = total_paths.columns)], axis=0)
# plot raw number of paths (all lengths), after removing edges of different types
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1100000))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types.pdf', format='pdf', bbox_inches='tight')
# normalized plot of all paths (all lengths), after removing edges of different types
max_control_paths = total_paths[total_paths.edges_removed==0].iloc[0, 0]
total_paths.loc[:, 'count'] = total_paths.loc[:, 'count']/max_control_paths
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.05))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types_normalized.pdf', format='pdf', bbox_inches='tight')
# plot total paths per path length from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed_path-lengths.csv')], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
total_paths_normalized = []
for i in range(len(total_paths.index)):
length = total_paths.iloc[i].path_length
row = [total_paths.iloc[i].condition, total_paths.iloc[i].N,
total_paths.iloc[i].edges_removed, total_paths.iloc[i].path_length,
total_paths.iloc[i].value/wildtype[wildtype.path_length==length].value.values[0]] # normalized path counts by wildtype
total_paths_normalized.append(row)
total_paths_normalized = pd.DataFrame(total_paths_normalized, columns = total_paths.columns)
for removed in [500, 1000, 2000, 4000, 8000]:
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data=total_paths_normalized[total_paths_normalized.edges_removed==removed], x='path_length', y='value', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.1))
plt.savefig(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-length-counts_left-to-left_removing-{removed}-edge-types.pdf', format='pdf', bbox_inches='tight')
# %%
# how many nodes are in each type of path?
# %%
##########
# EXPERIMENT 4: removing random number of ipsi vs contra edges, effect on paths on just one side of brain to opposite side
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_to-dVNC-right.csv')
path_counts_length_data = []
path_counts = [len(x) for x in graph_paths]
for row in zip(path_counts, [f'wildtype']*len(path_counts), [0]*len(path_counts), [0]*len(path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths_to-dVNC-right.csv')
# %%
# plot total paths per condition from left -> right paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_500-removed.csv', index_col=0),
| pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_1000-removed.csv', index_col=0) | pandas.read_csv |
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from datavalid.field_checkers import (
MatchRegexFieldChecker, TitleCaseFieldChecker, UniqueFieldChecker, NoNAFieldChecker, OptionsFieldChecker,
IntegerFieldChecker, FloatFieldChecker, RangeFieldChecker
)
class UniqueFieldCheckerTestCase(TestCase):
def test_check(self):
c = UniqueFieldChecker()
self.assertIsNone(c.check( | pd.Series([1, 2, 3]) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
def train_model_stacking(trX, trY, vaX, vaY, teX=None, teY=None, penalty='l2',
C=2 ** np.arange(-8, 1).astype(np.float), seed=42):
scores = []
for i, c in enumerate(C):
model = LogisticRegression(C=c, penalty=penalty, random_state=seed + i, solver='lbfgs')
model.fit(trX, trY)
score = model.score(vaX, vaY)
scores.append(score)
c = C[np.argmax(scores)]
model = LogisticRegression(C=c, penalty=penalty, random_state=seed + len(C), solver='lbfgs')
model.fit(trX, trY)
nnotzero = np.sum(model.coef_ != 0)
if teX is not None and teY is not None:
score = model.score(teX, teY) * 100.
else:
score = model.score(vaX, vaY) * 100.
return score, c, nnotzero, model
training_char_rnn_df = pd.read_pickle('training_char_rnn_df.pickle')
dev_char_rnn_df = | pd.read_pickle('dev_char_rnn_df.pickle') | pandas.read_pickle |
from abc import ABC, abstractmethod
from hyperopt import STATUS_OK
import numpy as np
import logging
import pandas as pd
import shap
import matplotlib.pyplot as plt
import seaborn as sns
from crosspredict.iterator import Iterator
class CrossModelFabric(ABC):
def __init__(self,
iterator: Iterator,
params,
feature_name,
col_target,
cols_cat='auto',
num_boost_round=99999,
early_stopping_rounds=50,
valid=True,
random_state=0,
cross_target_encoder=None
):
self.params = params
self.feature_name = feature_name
self.cols_cat = cols_cat
self.num_boost_round = num_boost_round
self.early_stopping_rounds = early_stopping_rounds
self.valid = valid
self.col_target = col_target
self.random_state = random_state
self.iterator = iterator
self.cross_target_encoder = cross_target_encoder
self.models = {}
self.scores = None
self.score_max = None
self.num_boost_optimal = None
self.std = None
@abstractmethod
def get_hyperopt_space(self, params, random_state):
pass
@abstractmethod
def get_dataset(self, data, label, categorical_feature, **kwargs):
pass
@abstractmethod
def train(
self,
params,
train_set,
train_name,
valid_sets,
valid_name,
num_boost_round,
evals_result,
categorical_feature,
early_stopping_rounds,
verbose_eval):
pass
def fit(self, df):
log = logging.getLogger(__name__)
scores = {}
scores_avg = []
log.info(self.params)
self.iterator.fit(df=df)
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
dtrain = self.get_dataset(
data=X_train.astype(float),
label=y_train,
categorical_feature=self.cols_cat)
dvalid = self.get_dataset(data=X_val.astype(float), label=y_val,
categorical_feature=self.cols_cat)
if fold % self.iterator.n_splits == 0:
log.info(f'REPEAT FOLDS {fold//self.iterator.n_splits} START')
# Обучение
evals_result = {}
if self.valid:
model = self.train(
params=self.params,
train_set=dtrain,
train_name='train',
valid_set=dvalid,
valid_name='eval',
num_boost_round=self.num_boost_round,
evals_result=evals_result,
categorical_feature=self.cols_cat,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=False)
else:
model = self.train(params=self.params,
train_set=dtrain,
num_boost_round=self.num_boost_round,
categorical_feature=self.cols_cat,
verbose_eval=False)
self.models[fold] = model
if self.valid:
# Построение прогнозов при разном виде взаимодействия
scores[fold] = evals_result['eval'][self.params['metric']]
best_auc = np.max(evals_result['eval'][self.params['metric']])
scores_avg.append(best_auc)
log.info(f'\tCROSSVALIDATION FOLD {fold%self.iterator.n_splits} ENDS with best `{self.params["metric"]}` = {best_auc}')
if self.valid:
self.scores = pd.DataFrame(
dict([(k, pd.Series(v)) for k, v in scores.items()]))
mask = self.scores.isnull().sum(axis=1) == 0
self.num_boost_optimal = np.argmax(
self.scores[mask].mean(axis=1).values)
self.score_max = self.scores[mask].mean(
axis=1)[self.num_boost_optimal]
# self.score_max = np.mean(scores_avg)
self.std = self.scores[mask].std(axis=1)[self.num_boost_optimal]
# self.std = np.std(scores_avg)
result = {'loss': -self.score_max,
'status': STATUS_OK,
'std': self.std,
'score_max': self.score_max,
'scores_all': scores_avg,
'num_boost': int(self.num_boost_optimal),
}
log.info(result)
return result
return self
def transform(self, df):
x = df[self.feature_name]
y = df[self.col_target]
predict = pd.Series(index=df.index, data=np.zeros(df.shape[0]))
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = | pd.concat([train, encoded_train], axis=1) | pandas.concat |
import pandas as pd
df1 = pd.read_csv('data//alexander_algoaddition_adddate.csv')
df2 = pd.read_csv('data//michael_algoaddition_adddate.csv')
df3 = pd.read_csv('data//randyll_algoaddition_adddate.csv')
df = | pd.concat([df1, df2, df3], ignore_index=True) | pandas.concat |
# Data Management
import pandas
# External Interfaces
import glob
import kaggle
import os
from zipfile import ZipFile
# Evaluation
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
# Processing
import numpy
import scipy
from scipy.stats import chi2
# Modeling
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.svm import OneClassSVM
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
# Experiment 1
mahalanobis_predictions = numpy.load('../data/mahalanobis-predictions.npy')
mahalanobis_targets = numpy.load('../data/mahalanobis-targets.npy')
# Experiment 2
isoforest_predictions = numpy.load('../data/isoforest-predictions.npy')
isoforest_targets = numpy.load('../data/isoforest-targets.npy')
# Experiment 3
regression_predictions = numpy.load('../data/multiple-linear-regression-predictions.npy')
regression_targets = numpy.load('../data/multiple-linear-regression-targets.npy')
# Experiment 4
pca_knn_predictions = numpy.load('../data/pca-knn-predictions.npy')
pca_knn_targets = numpy.load('../data/pca-knn-targets.npy')
# Experiment 5
pca_lof_predictions = numpy.load('../data/pca-lof-predictions.npy')
pca_lof_targets = numpy.load('../data/pca-lof-targets.npy')
for index in range(0, len(isoforest_predictions)):
if isoforest_predictions[index] == -1:
isoforest_predictions[index] = 0
for index in range(0, len(regression_predictions)):
if regression_predictions[index] >= 0.5:
regression_predictions[index] = 1
else:
regression_predictions[index] = 0
for index in range(0, len(pca_lof_predictions)):
if pca_lof_predictions[index] == -1:
pca_lof_predictions[index] = 0
mahalanobis_recall = recall_score(mahalanobis_targets, mahalanobis_predictions)
isoforest_recall = recall_score(isoforest_targets, isoforest_predictions)
regression_recall = recall_score(regression_targets, regression_predictions)
pca_knn_recall = recall_score(pca_knn_targets, pca_knn_predictions)
pca_lof_recall = recall_score(pca_lof_targets, pca_lof_predictions)
mahalanobis_precision = precision_score(mahalanobis_targets, mahalanobis_predictions)
isoforest_precision = precision_score(isoforest_targets, isoforest_predictions)
regression_precision = precision_score(regression_targets, regression_predictions)
pca_knn_precision = precision_score(pca_knn_targets, pca_knn_predictions)
pca_lof_precision = precision_score(pca_lof_targets, pca_lof_predictions)
mahalanobis_auroc = roc_auc_score(mahalanobis_targets, mahalanobis_predictions)
isoforest_auroc = roc_auc_score(isoforest_targets, isoforest_predictions)
regression_auroc = roc_auc_score(regression_targets, regression_predictions)
pca_knn_auroc = roc_auc_score(pca_knn_targets, pca_knn_predictions)
pca_lof_auroc = roc_auc_score(pca_lof_targets, pca_lof_predictions)
name_series = pandas.Series(['Mahalanobis Distances', 'Isolation Forests', 'Multiple Linear Regression', 'PCA K-Nearest Neighbors', 'PCA Local Outlier Factor'])
precision_series = | pandas.Series([mahalanobis_precision, isoforest_precision, regression_precision, pca_knn_precision, pca_lof_precision ]) | pandas.Series |
#!/usr/bin/env python3
import json
import os
from tqdm import tqdm
import shlex, subprocess
import time, datetime
import pandas as pd
''' Data frame functions '''
def dumpdf(df):
print(df.shape)
print(df.head(5))
print(df.dtypes)
def getColumns(fn):
for fdf in pd.read_csv(fn, chunksize=1):
return list(fdf.columns)
def getHead(fn):
for fdf in | pd.read_csv(fn, chunksize=5) | pandas.read_csv |
from logging import disable
import os
import json
import glob
import yaml
from pathlib import Path
from typing import List
import collections
import music_trees as mt
import numpy as np
import pandas as pd
import tqdm
from tqdm.contrib.concurrent import process_map
"""
record utils (for records that only contain one event)
"""
def make_entry(signal, dataset: str, uuid: str, format: str, example_length: float, hop_length: float,
sample_rate: int, label: str, **extra):
""" create a new dataset entry
"""
assert signal.sample_rate == sample_rate
return dict(dataset=dataset, uuid=uuid, format=format, example_length=example_length,
hop_length=hop_length, sample_rate=sample_rate, label=label, **extra)
def get_path(entry):
""" returns an entry's path without suffix
add .wav for audio, .json for metadata
"""
return mt.DATA_DIR / entry['dataset'] / entry['label'] / entry['uuid']
def list_subdir(path):
""" list all subdirectories given a directory"""
return [o for o in os.listdir(path) if os.path.isdir(path / o)]
def get_classlist(records):
""" iterate through records and get the set
of all labels
"""
all_labels = [entry['label'] for entry in records]
classlist = list(set(all_labels))
classlist.sort()
return classlist
def get_one_hot(label: str, classes: List[str]):
"""
given a label and its classlist,
returns an np array one-hot of it
"""
if label not in classes:
raise ValueError(f"{label} is not in {classes}")
return np.array([1 if label == c else 0 for c in classes])
def get_class_frequencies(records: List[dict]):
""" counts the number of examples belonging to each label, and returns a dict
"""
all_labels = [entry['label'] for entry in records]
counter = collections.Counter(all_labels)
return dict(counter)
def filter_records_by_class_subset(records: List[dict], class_subset: List[str]):
""" remove all records that don't belong to the provided class subset"""
subset = [entry for entry in records if entry['label'] in class_subset]
return subset
def filter_unwanted_classes(records, unwanted_classlist):
""" given a list of unwanted classes, remove all records that match those """
subset = [entry for entry in records if not entry['label']
in unwanted_classlist]
return subset
"""
glob
"""
def glob_all_metadata_entries(root_dir, pattern='**/*.json'):
""" reads all metadata files recursively and loads them into
a list of dicts
"""
pattern = os.path.join(root_dir, pattern)
filepaths = glob.glob(pattern, recursive=True)
# metadata = tqdm.contrib.concurrent.process_map(load_yaml, filepaths, max_workers=20, chunksize=20)
# records = [load_entry(path) for path in tqdm.tqdm(
# filepaths, disable=mt.TQDM_DISABLE)]
records = process_map(
load_entry, filepaths, disable=mt.TQDM_DISABLE, max_workers=os.cpu_count())
return records
"""
json and yaml
"""
def _add_file_format_to_filename(path: str, file_format: str):
if '.' not in file_format:
file_format = f'.{file_format}'
if Path(path).suffix != file_format:
path = Path(path).with_suffix(file_format)
return str(path)
def save_entry(entry, path, format='json'):
""" save to json (or yaml) """
os.makedirs(Path(path).parent, exist_ok=True)
path = _add_file_format_to_filename(path, format)
if format == 'json':
with open(path, 'w') as f:
json.dump(entry, f)
elif format == 'yaml':
with open(path, 'w') as f:
yaml.dump(entry, f)
def load_entry(path, format='json'):
""" load json (or yaml) """
entry = None
if format == 'json':
with open(path, 'r') as f:
entry = json.load(f)
elif format == 'yaml':
with open(path, 'r') as f:
entry = yaml.load(f)
else:
raise ValueError(f'unsupported format: {format}')
return entry
"""
csv
"""
def save_records_csv(records, path_to_records):
pd.DataFrame(records).to_csv(path_to_records, index=False)
def load_records_csv(path_to_records):
assert os.path.exists(path_to_records), f"{path_to_records} does not exist"
records = | pd.read_csv(path_to_records) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.cross_validation import KFold, train_test_split
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.linear_model import RidgeCV, LogisticRegressionCV, LogisticRegression, Ridge, LassoCV
from datetime import datetime
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import xgboost as xgb
from gini_score import *
from sklearn.externals import joblib
from xgbex import PropertyInspectionXGBRegressor
# This solution is based on:
# "Bench-Stacked-Generalization" (https://www.kaggle.com/justfor/liberty-mutual-group-property-inspection-prediction/bench-stacked-generalization)
# with ideas taken from
# "Blah-XGB" (https://www.kaggle.com/soutik/liberty-mutual-group-property-inspection-prediction/blah-xgb)
seed = 42
nthread = 10
silent = 1
xgb_n_estimators = 10000
n_folds = 12
esr = 360
def get_ranks(x):
ind = x.argsort()
ranks = np.empty(len(x), int)
ranks[ind] = np.arange(len(x))
return ranks
def get_data(training_file, test_file):
drop_out = ['T1_V10', 'T1_V13', 'T2_V7', 'T2_V10']
train = | pd.read_csv('../input/' + training_file) | pandas.read_csv |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * | DateOffset(1) | pandas.core.datetools.DateOffset |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
| pd.Timedelta('2 days 08:00:00') | pandas.Timedelta |
import pandas as pd
import matplotlib.pyplot as plt
def import_data(file, print_head = False):
df = pd.read_csv('data/{}.csv'.format(file))
print(file)
if print_head == True:
print("First few values:")
print(df.head())
return df
def import_data_date_index(file, print_head = False):
df = pd.read_csv('data/{}.csv'.format(file), index_col = "Date", parse_dates = True)
print(file)
if print_head == True:
print("First few values:")
print(df.head())
return df
def import_data_date_index_one_col(file, print_head = False):
df = pd.read_csv('data/{}.csv'.format(file),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan']
)
print(file)
if print_head == True:
print("First few values:")
print(df.head())
return df
def print_max_close(df):
close_prices = df['Volume']
max_close = close_prices.max()
print(max_close)
def print_mean_volume(df):
volume = df['Volume']
mean_volume = volume.mean()
print(mean_volume)
def plot_adj_close(df):
df['Adj Close'].plot()
plt.show()
def plot_high_price(df):
df['High'].plot()
plt.show()
def plot_two_columns(df):
df[['High', 'Adj Close']].plot()
plt.show()
def creating_empty_df():
start_date = '2010-01-22'
end_date = '2010-01-26'
dates = | pd.date_range(start_date, end_date) | pandas.date_range |
"""
Transformations that primarily involve numerical computation on variables.
"""
import math
import numpy as np
import pandas as pd
from bids.utils import listify
from .base import Transformation
from bids.modeling import hrf
from bids.variables import SparseRunVariable, DenseRunVariable
def _fractional_gcd(vals, res=0.001):
from functools import reduce
from math import gcd
return reduce(gcd, (int(np.round(val / res)) for val in vals)) * res
class Convolve(Transformation):
"""Convolve the input variable with an HRF.
Parameters
----------
var : Variable
The variable to convolve.
model : str
The name of the HRF model to apply. Must be one of 'spm',
'glover', or 'fir'.
derivative : bool
Whether or not to include the temporal derivative.
dispersion : bool
Whether or not to include the dispersion derivative.
fir_delays : iterable
A list or iterable of delays to use if model is
'fir' (ignored otherwise). Spacing between delays must be fixed.
Notes
-----
Uses the HRF convolution functions implemented in nistats.
"""
_input_type = 'variable'
_return_type = 'variable'
def _transform(self, var, model='spm', derivative=False, dispersion=False,
fir_delays=None):
model = model.lower()
df = var.to_df(entities=False)
if isinstance(var, SparseRunVariable):
sampling_rate = self.collection.sampling_rate
dur = var.get_duration()
resample_frames = np.linspace(
0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)
safety = 2 # Double frequency to resolve events
else:
resample_frames = df['onset'].values
sampling_rate = var.sampling_rate
safety = 1 # Maximum signal resolution is already 0.5 * SR
vals = df[['onset', 'duration', 'amplitude']].values.T
if model in ['spm', 'glover']:
if derivative:
model += ' + derivative'
if dispersion:
model += ' + dispersion'
elif model != 'fir':
raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.")
# Sampling at >100Hz will never be useful, but can be wildly expensive
max_freq, min_interval = 100, 0.01
# Sampling at <1Hz can degrade signals
min_freq, max_interval = 1, 1
# Given the sampling rate, determine an oversampling factor to ensure that
# events can be modeled with reasonable precision
unique_onsets = np.unique(df.onset)
unique_durations = np.unique(df.duration)
# Align existing data ticks with, event onsets and offsets, up to ms resolution
# Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do
# not harm this.
required_resolution = _fractional_gcd(
np.concatenate((unique_onsets, unique_durations)),
res=min_interval)
# Bound the effective sampling rate between min_freq and max_freq
effective_sr = max(min_freq, min(safety / required_resolution, max_freq))
convolved = hrf.compute_regressor(
vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,
oversampling=np.ceil(effective_sr / sampling_rate)
)
return DenseRunVariable(
name=var.name, values=convolved[0], run_info=var.run_info,
source=var.source, sampling_rate=sampling_rate)
class Demean(Transformation):
def _transform(self, data):
return data - data.mean()
class Orthogonalize(Transformation):
_variables_used = ('variables', 'other')
_densify = ('variables', 'other')
_aligned_required = 'force_dense'
_aligned_variables = ('other')
def _transform(self, var, other):
other = listify(other)
# Set up X matrix and slice into it based on target variable indices
X = np.array([self._variables[c].values.values.squeeze()
for c in other]).T
X = X[var.index, :]
assert len(X) == len(var)
y = var.values
_aX = np.c_[np.ones(len(y)), X]
coefs, resids, rank, s = np.linalg.lstsq(_aX, y, rcond=None)
result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)
return result
class Product(Transformation):
_loopable = False
_groupable = False
_aligned_required = True
_output_required = True
def _transform(self, data):
data = pd.concat(data, axis=1, sort=True)
return data.product(1)
class Scale(Transformation):
"""Scale a variable.
Parameters
----------
data : :obj:`pandas.Series` or :obj:`pandas.DataFrame`
The variables to scale.
demean : bool
If True, demean each column.
rescale : bool
If True, divide variables by their standard deviation.
replace_na : str
Whether/when to replace missing values with 0. If
None, no replacement is performed. If 'before', missing values are
replaced with 0's before scaling. If 'after', missing values are
replaced with 0 after scaling.
Notes
-----
If a constant column is passed in, and replace_na is None or 'before', an
exception will be raised.
"""
def _transform(self, data, demean=True, rescale=True, replace_na=None):
if data.nunique() == 1 and replace_na in {None, 'before'}:
val = data.unique()[0]
raise ValueError("Cannot scale a column with constant value ({})! "
"If you want a constant column of 0's returned, "
"set replace_na to 'after'.".format(val))
if replace_na == 'before':
data = data.fillna(0.)
if demean:
data -= data.mean()
if rescale:
data /= data.std()
if replace_na == 'after':
data = data.fillna(0.)
return data
class Sum(Transformation):
_loopable = False
_groupable = False
_aligned_required = True
_output_required = True
def _transform(self, data, weights=None):
data = pd.concat(data, axis=1, sort=True)
if weights is None:
weights = np.ones(data.shape[1])
else:
weights = np.array(weights)
if len(weights.ravel()) != data.shape[1]:
raise ValueError("If weights are passed to sum(), the number "
"of elements must equal number of variables"
" being summed.")
return (data * weights).sum(axis=1)
class Threshold(Transformation):
"""Threshold and/or binarize a variable.
Parameters
----------
data :obj:`pandas.Series` or :obj:`pandas.DataFrame`
The pandas structure to threshold.
threshold : float
The value to binarize around (values above will
be assigned 1, values below will be assigned 0).
binarize : bool
If True, binarizes all non-zero values (i.e., every
non-zero value will be set to 1).
above : bool
Specifies which values to retain with respect to the
cut-off. If True, all value above the threshold will be kept; if
False, all values below the threshold will be kept. Defaults to
True.
signed : bool
Specifies whether to treat the threshold as signed
(default) or unsigned. For example, when passing above=True and
threshold=3, if signed=True, all and only values above +3 would be
retained. If signed=False, all absolute values > 3 would be retained
(i.e.,values in the range -3 < X < 3 would be set to 0).
"""
_groupable = False
def _transform(self, data, threshold=0., binarize=False, above=True,
signed=True):
if not signed:
threshold = np.abs(threshold)
data = data.abs()
keep = data >= threshold if above else data <= threshold
data[~keep] = 0
if binarize:
data[keep] = 1
return data
class And_(Transformation):
"""Logical AND on two or more variables.
Parameters
----------
dfs : list of :obj:`pandas.DataFrame`
variables to enter into the conjunction.
"""
_loopable = False
_groupable = False
_output_required = True
_aligned_required = True
def _transform(self, dfs):
df = pd.concat(dfs, axis=1, sort=True)
return df.all(axis=1).astype(int)
class Not(Transformation):
"""Logical negation of a variable.
Parameters
----------
var : :obj:`pandas.Series`
Variable to negate. Must be convertible to bool.
"""
_loopable = True
_groupable = False
def _transform(self, var):
return ~var.astype(bool)
class Or_(Transformation):
"""Logical OR (inclusive) on two or more variables.
Parameters
----------
dfs : list of :obj:`pandas.DataFrame`
variables to enter into the disjunction.
"""
_loopable = False
_groupable = False
_output_required = True
_aligned_required = True
def _transform(self, dfs):
df = | pd.concat(dfs, axis=1, sort=True) | pandas.concat |
#########
#File: c:\Users\digan\Dropbox\Dynamic_Networks\repos\ScoreDrivenExponentialRandomGraphs\_research\analysis_for_paper_revision\applic_reddit\0_load_reddit_pre_process.py
#Created Date: Tuesday May 4th 2021
#Author: <NAME>, <<EMAIL>>
#-----
#Last Modified: Thursday May 6th 2021 1:46:42 pm
#Modified By: <NAME>
#-----
#Description: preprocess reddit hyperlink data downloaded from https://snap.stanford.edu/data/soc-RedditHyperlinks.html
#-----
########
#%%
import pandas as pd
import numpy as np
import os
import sys
from matplotlib import pyplot as plt
#%%
# load data and rename columns
data_path = "../../../data/reddit_hyperlinks/raw_data/"
os.listdir(data_path)
col_names = ["source", "target", "post_id", "time", "sentiment", "properties"]
df_orig = pd.read_csv(f"{data_path}soc-redditHyperlinks-body.tsv", names = col_names, sep="\t", header = 0)
df_orig["datetime"] = pd.to_datetime(df_orig.time)
df_orig = df_orig.set_index("datetime")
df_orig = df_orig.sort_values(by="datetime")
#%% EDA
# check aggregate number of obs
df_count = df_orig.time.resample("W").count()
plt.plot(df_count, ".")
plt.plot(df_count[df_count==0], ".r")
# number of nodes appearing at least once
pd.concat((df_orig.source, df_orig.target)).unique().shape[0]
# how many nodes appear n-times ?
plt.plot( | pd.concat((df_orig.source, df_orig.target)) | pandas.concat |
import pandas as pd
import re
# Input: pandas DataFrame for total_milk_yield (per cow)
# Output: Average daily milk yield per cow
def avg_daily_milk_per_cow(total_milk_yield):
return round(total_milk_yield.sum()/len(total_milk_yield), 2)
# Input: pandas DataFrame for start_time, kickOff
# Output: Average kickOffs based on the number of dates present in the data
def avg_kickOffs(start_time, Animal_ID, kickOff):
return round(kickOff.sum()/(len(pd.unique(start_time)) + len(pd.unique(start_time))),2)
# Input: pandas DataFrame for date, animal_id, action and milk_yield
# Output: List containing robot name followed by the average received daily milk yield, for each robot
# as well as the latest date used for calculations and the sum of all yields
def avg_milk_from_robots(date, robot, action, milk_yield):
# Concatenate data
data = pd.concat([date, robot, action, milk_yield], axis = 1)
# Drop rows which are not milking
data = data.drop(data[data['Action'] != 1].index)
# Get the unique dates and robot names
dates = pd.unique(data['Date'])
robot_names = pd.unique(data['Robot'])
# Calculate the daily average milk yield received by each robot
avg_milk_by_robot = []
avg_total = 0
for robot in robot_names:
robot_data = data[data['Robot'] == robot]
avg_milk = robot_data['Milk_yield'].sum()/len(dates)
avg_total += avg_milk
avg_milk_by_robot.append(robot)
avg_milk_by_robot.append(round(avg_milk, 2))
# Add the latest date used for calculations and add the average milk from all robots
avg_milk_by_robot.append(data['Date'].iloc[-1])
avg_milk_by_robot.append(round(avg_total, 2))
return avg_milk_by_robot
# Input: pandas DataFrame for nr_of_milkings (per cow)
# Output: Average daily number of milkings per cow
def avg_nr_of_milkings_per_cow(nr_of_milkings):
return round(nr_of_milkings.sum()/len(nr_of_milkings), 2)
# Input: pandas DataFrame for animal_id, date and the result from communicating with the smartgate
# Output: Average number of smartgate passes based on the number of dates present in the data
def avg_nr_pass_smartgate(animal_id, date, result):
# Concatenate data
data = pd.concat([animal_id, date, result], axis = 1)
# Get rows which resulted in pass
data = data[data['Result'] == 1]
# Get the unique dates
dates = pd.unique(data['Date'])
# For each date, compute how many cows have passed the smartgate
active_cows = 0
for date in dates:
active_cows += len(pd.unique(data[data['Date'] == date]['Animal_ID']))
return round(len(data)/active_cows, 2)
# Input: pandas DataFrame for milking_time
# Output: Average milking time per milking event logged
def avg_time_in_robot(timeInRobot):
times=[]
for i in timeInRobot:
temp = re.findall(r'\d+', i)
times.append(float(temp[0])+float(temp[1])/60)
return round(pd.DataFrame(times).mean()[0],2)
# Input: pandas DataFrame for start_time, animal_id, action and milk_weight,
# pandas DataFrame for animal_id and official ID
# pandas DataFrame for official ID and number of days in milk
# Output: Average milking volume per cow who has been in milk up to 100 days based on the number of dates present in the data
def avg_milking_volume_lact_0_100(start_time, animal_id, action, milk, animal_id2, off_id, off_id2, lact):
# Concatenate data
data = pd.concat([start_time, animal_id, action, milk], axis = 1)
data2 = pd.concat([animal_id2, off_id], axis = 1)
data3 = pd.concat([off_id2, lact], axis = 1)
# Removes all cows who doesn't fit the criteria
data3 = data3[data3['Days In Milk'] <= 100]
data2 = data2[data2['Official Reg. No. (ORN)'].isin(data3['Official Reg. No. (ORN)'])]
data = data[data['Animal_ID'].isin(data2['Animal Number'])]
# Get the unique dates and cows
dates = pd.unique(data['Date'])
cow_id = | pd.unique(data['Animal_ID']) | pandas.unique |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
"""
Test related to MultiIndex
"""
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core.column import as_column
from cudf.core.index import as_index
from cudf.tests.utils import assert_eq, assert_neq
def test_multiindex_levels_codes_validation():
levels = [["a", "b"], ["c", "d"]]
# Codes not a sequence of sequences
with pytest.raises(TypeError):
pd.MultiIndex(levels, [0, 1])
with pytest.raises(TypeError):
cudf.MultiIndex(levels, [0, 1])
# Codes don't match levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0], [1], [1]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0], [1], [1]])
# Largest code greater than number of levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0, 2]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0, 2]])
# Unequal code lengths
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0]])
# Didn't pass levels and codes
with pytest.raises(TypeError):
pd.MultiIndex()
with pytest.raises(TypeError):
cudf.MultiIndex()
# Didn't pass non zero levels and codes
with pytest.raises(ValueError):
pd.MultiIndex([], [])
with pytest.raises(ValueError):
cudf.MultiIndex([], [])
def test_multiindex_construction():
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels=levels, codes=codes)
assert_eq(pmi, mi)
def test_multiindex_types():
codes = [[0, 1], [1, 0]]
levels = [[0, 1], [2, 3]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [[1.2, 2.1], [1.3, 3.1]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [["a", "b"], ["c", "d"]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
def test_multiindex_df_assignment():
pdf = pd.DataFrame({"x": [1, 2, 3]})
gdf = cudf.from_pandas(pdf)
pdf.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gdf.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(pdf, gdf)
def test_multiindex_series_assignment():
ps = pd.Series([1, 2, 3])
gs = cudf.from_pandas(ps)
ps.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gs.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(ps, gs)
def test_string_index():
from cudf.core.index import StringIndex
pdf = pd.DataFrame(np.random.rand(5, 5))
gdf = cudf.from_pandas(pdf)
stringIndex = ["a", "b", "c", "d", "e"]
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = np.array(["a", "b", "c", "d", "e"])
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = StringIndex(["a", "b", "c", "d", "e"], name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = as_index(as_column(["a", "b", "c", "d", "e"]), name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
def test_multiindex_row_shape():
pdf = pd.DataFrame(np.random.rand(0, 5))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
with pytest.raises(ValueError):
pdf.index = pdfIndex
with pytest.raises(ValueError):
gdf.index = gdfIndex
@pytest.fixture
def pdf():
return pd.DataFrame(np.random.rand(7, 5))
@pytest.fixture
def gdf(pdf):
return cudf.from_pandas(pdf)
@pytest.fixture
def pdfIndex():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
["fire", "smoke", "clear"],
[
np.datetime64("2001-01-01", "ns"),
np.datetime64("2002-01-01", "ns"),
np.datetime64("2003-01-01", "ns"),
],
],
[
[0, 0, 0, 0, 1, 1, 2],
[1, 1, 1, 1, 0, 0, 2],
[0, 0, 2, 2, 2, 0, 1],
[0, 0, 0, 1, 2, 0, 1],
[1, 0, 1, 2, 0, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather", "sign", "timestamp"]
return pdfIndex
@pytest.fixture
def pdfIndexNulls():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
],
[
[0, 0, 0, -1, 1, 1, 2],
[1, -1, 1, 1, 0, 0, -1],
[-1, 0, 2, 2, 2, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather"]
return pdfIndex
def test_from_pandas(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
def test_multiindex_transpose(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.transpose(), gdf.transpose())
def test_from_pandas_series():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
).set_index(["a", "b"])
result = cudf.from_pandas(pdf)
assert_eq(pdf, result)
test_pdf = pdf["c"]
result = cudf.from_pandas(test_pdf)
assert_eq(test_pdf, result)
def test_series_multiindex(pdfIndex):
ps = pd.Series(np.random.rand(7))
gs = cudf.from_pandas(ps)
ps.index = pdfIndex
gs.index = cudf.from_pandas(pdfIndex)
assert_eq(ps, gs)
def test_multiindex_take(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index.take([0]), gdf.index.take([0]))
assert_eq(pdf.index.take(np.array([0])), gdf.index.take(np.array([0])))
from cudf import Series
assert_eq(pdf.index.take(pd.Series([0])), gdf.index.take(Series([0])))
assert_eq(pdf.index.take([0, 1]), gdf.index.take([0, 1]))
assert_eq(
pdf.index.take(np.array([0, 1])), gdf.index.take(np.array([0, 1]))
)
assert_eq(
pdf.index.take(pd.Series([0, 1])), gdf.index.take(Series([0, 1]))
)
def test_multiindex_getitem(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index[0], gdf.index[0])
@pytest.mark.parametrize(
"key_tuple",
[
# return 2 rows, 0 remaining keys = dataframe with entire index
("a", "store", "clouds", "fire"),
(("a", "store", "clouds", "fire"), slice(None)),
# return 2 rows, 1 remaining key = dataframe with n-k index columns
("a", "store", "storm"),
(("a", "store", "storm"), slice(None)),
# return 2 rows, 2 remaining keys = dataframe with n-k index columns
("a", "store"),
(("a", "store"), slice(None)),
# return 2 rows, n-1 remaining keys = dataframe with n-k index columns
("a",),
(("a",), slice(None)),
# return 1 row, 0 remaining keys = dataframe with entire index
("a", "store", "storm", "smoke"),
(("a", "store", "storm", "smoke"), slice(None)),
# return 1 row and 1 remaining key = series
("c", "forest", "clear"),
(("c", "forest", "clear"), slice(None)),
],
)
def test_multiindex_loc(pdf, gdf, pdfIndex, key_tuple):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.loc[key_tuple], gdf.loc[key_tuple])
def test_multiindex_loc_slice(pdf, gdf, pdfIndex):
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store"):("b", "house")],
gdf.loc[("a", "store"):("b", "house")],
)
def test_multiindex_loc_then_column(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store", "clouds", "fire"), :][0],
gdf.loc[("a", "store", "clouds", "fire"), :][0],
)
def test_multiindex_loc_rows_0(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
with pytest.raises(KeyError):
print(pdf.loc[("d",), :].to_pandas())
with pytest.raises(KeyError):
print(gdf.loc[("d",), :].to_pandas())
assert_eq(pdf, gdf)
def test_multiindex_loc_rows_1_2_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c", "forest"), :])
print(gdf.loc[("c", "forest"), :].to_pandas())
assert_eq(pdf.loc[("c", "forest"), :], gdf.loc[("c", "forest"), :])
def test_multiindex_loc_rows_1_1_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c",), :])
print(gdf.loc[("c",), :].to_pandas())
assert_eq(pdf.loc[("c",), :], gdf.loc[("c",), :])
def test_multiindex_column_shape():
pdf = pd.DataFrame(np.random.rand(5, 0))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
with pytest.raises(ValueError):
pdf.columns = pdfIndex
with pytest.raises(ValueError):
gdf.columns = gdfIndex
@pytest.mark.parametrize(
"query",
[
("a", "store", "clouds", "fire"),
("a", "store", "storm", "smoke"),
("a", "store"),
("b", "house"),
("a", "store", "storm"),
("a",),
("c", "forest", "clear"),
],
)
def test_multiindex_columns(pdf, gdf, pdfIndex, query):
pdf = pdf.T
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.columns = pdfIndex
gdf.columns = gdfIndex
assert_eq(pdf[query], gdf[query])
def test_multiindex_from_tuples():
arrays = [["a", "a", "b", "b"], ["house", "store", "house", "store"]]
tuples = list(zip(*arrays))
pmi = pd.MultiIndex.from_tuples(tuples)
gmi = cudf.MultiIndex.from_tuples(tuples)
assert_eq(pmi, gmi)
def test_multiindex_from_dataframe():
if not hasattr(pd.MultiIndex([[]], [[]]), "codes"):
pytest.skip()
pdf = pd.DataFrame(
[["a", "house"], ["a", "store"], ["b", "house"], ["b", "store"]]
)
gdf = cudf.from_pandas(pdf)
pmi = | pd.MultiIndex.from_frame(pdf, names=["alpha", "location"]) | pandas.MultiIndex.from_frame |
import os
import sys
import multiprocessing as mp
import string
import platform
import shutil
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
import calendar
import pyemu
import flopy
# some global config for plotting
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
abet = string.ascii_uppercase
# some global config for path/directory structure
old_h_dir = os.path.join("..", "ver")
h_nam_file = "eaa_ver.nam"
h_dir = "history"
h_start_datetime = "1-1-2001"
h_end_datetime = "12-31-2015"
old_s_dir = os.path.join("..", "pred")
s_dir = "scenario"
s_nam_file = "eaa_pred.nam"
# history and scenarion simulation start datetimes
s_start_datetime = "1-1-1947"
s_end_datetime = "12-31-1958"
# files with history and scenario observation locations and states
h_hds_file = os.path.join("_data", "reformatted_head_obs.smp")
h_drn_file = os.path.join("_data", "springflow_obs.smp")
h_crd_file = os.path.join("_data", "head_obs.crd")
s_hds_file = os.path.join("_data", "pred_head_obs.smp")
s_drn_file = os.path.join("_data", "pred_springflow_obs.smp")
s_crd_file = os.path.join("_data", "pred_head_obs.crd")
# value of dry cells
hdry = -1.0e+20
# platform-specific binary information
exe_name = "mf2005"
ies_name = "pestpp-ies"
if "window" in platform.platform().lower():
bin_path = os.path.join("bin", "win")
exe_name = exe_name + ".exe"
ies_name = ies_name + ".exe"
elif "darwin" in platform.platform().lower():
bin_path = os.path.join("bin", "mac")
else:
bin_path = os.path.join("bin", "linux")
# the numeric IDs of J-17 and J-27
j17_id = 6837203
j27_id = 6950302
def _setup_model(old_dir, new_dir, start_datetime, nam_file, run=False):
"""load an existing model (either history or scenario) and configure it for
PEST interface construction
Args:
old_dir (str): directory location where the original model resides
new_dir (str): directory location where the new model files will be written
start_datetime (str): string rep of model starting datetime
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run the model once it is written to new_dir. Default is False
"""
# load the existing model and set some attributes
m = flopy.modflow.Modflow.load(nam_file, model_ws=old_dir, check=False,
verbose=True, forgive=False)
m.start_datetime = start_datetime
m.lpf.hdry = hdry
m.bas6.hnoflo = hdry
# change the workspace to new_dir
m.change_model_ws(new_dir, reset_external=True)
# set the external path so that arrays and lists are outside of the
# terrible MODFLOW file formats
m.external_path = "."
# write the inputs
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_dir, exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd=new_dir)
def _rectify_wel(model_ws, nam_file, run=True):
"""rectify the stress period WEL file entries so that every
stress period has the same entries (filling missing wells with
"dummy" entries with zero pumping)
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run model once the WEL file has been rectified.
Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, forgive=False)
# get the current WEL file datasets
spd = m.wel.stress_period_data
df_dict = {}
all_kij = set()
# run thru all stress periods to get the union of well locations
for kper in range(m.nper):
ra = spd[kper]
df = pd.DataFrame.from_records(ra)
df.loc[:, "kij"] = df.apply(lambda x: (x.k, x.i, x.j), axis=1)
df.loc[:, "kij_str"] = df.kij.apply(lambda x: "{0:01.0f}_{1:03.0f}_{2:03.0f}".format(*x))
df.index = df.kij_str
all_kij.update(set(df.kij_str.tolist()))
print(kper)
df_dict[kper] = df
# work up fast-lookup containers for well location indices
new_index = list(all_kij)
new_k = {s: int(s.split('_')[0]) for s in new_index}
new_i = {s: int(s.split('_')[1]) for s in new_index}
new_j = {s: int(s.split('_')[2]) for s in new_index}
new_index.sort()
# process each stress period
new_spd = {}
for kper, df in df_dict.items():
# reindex with the full kij locations index
df = df.reindex(new_index)
# map the new kijs to the old kijs
for f, d in zip(["k", "i", "j"], [new_k, new_i, new_j]):
isna = df.loc[:, f].isna()
df.loc[isna, f] = [d[kij] for kij in df.loc[isna, :].index.values]
# fill the nans with 0.0
isna = df.flux.isna()
df.loc[isna, "flux"] = 0.0
# deal with the platform numpy int casting issue
if "window" in platform.platform():
df.loc[:, "i"] = df.i.astype(np.int32)
df.loc[:, "j"] = df.j.astype(np.int32)
df.loc[:, "k"] = df.k.astype(np.int32)
else:
df.loc[:, "i"] = df.i.astype(np.int)
df.loc[:, "j"] = df.j.astype(np.int)
df.loc[:, "k"] = df.k.astype(np.int)
spd[kper] = df.loc[:, ["k", "i", "j", "flux"]].to_records(index=False)
# create a new WEL package and replace the old one
flopy.modflow.ModflowWel(m, stress_period_data=spd, ipakcb=m.wel.ipakcb)
# write to a new model_ws with a "_wel" suffix
m.change_model_ws("{0}_wel".format(model_ws))
m.external_path = '.'
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join("{0}_wel".format(model_ws), exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd="{0}_wel".format(model_ws))
# just to make sure the model ran
new_lst = flopy.utils.MfListBudget(os.path.join("{0}_wel".format(model_ws), nam_file.replace(".nam", ".list")))
def build_rch_zone_array(model_ws, nam_file, plot=False):
"""build a recharge zone integer array for zone-based parameters
using unique values in the in recharge arrays
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
plot (bool): flag to plot the zone array. Default is False
"""
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, load_only=["rch"], check=False,
verbose=True, forvive=False)
arr = m.rch.rech[0].array
full_arr = m.rch.rech.array
mn = full_arr.mean(axis=0)[0, :, :]
mn_u, mn_c = np.unique(mn, return_counts=True)
zn_arr = np.zeros_like(arr, dtype=np.int)
for i, u_val in enumerate(mn_u):
# this contional makes sure we keep zeros as zero in the zone array
if u_val == 0.0:
continue
zn_arr[mn == u_val] = i
np.savetxt(os.path.join("_data", "rch_zn_arr.dat"), zn_arr, fmt="%3d")
if plot:
zn_arr = zn_arr.astype(np.float)
zn_arr[zn_arr == 0] = np.NaN
cb = plt.imshow(zn_arr)
plt.colorbar(cb)
plt.show()
def _setup_pst(org_model_ws, new_model_ws, nam_file):
"""construct the PEST interface, set parameter bounds and
generate the prior ensemble
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace/directory where the
PEST interface will be constructed
nam_file (str): MODFLOW-2005 nam file
"""
# make sure the model simulated heads file exists - need this for observations
if not os.path.exists(os.path.join(org_model_ws, nam_file.replace(".nam", ".hds"))):
raise Exception("need to call _setup_model()")
# load the model from org_model_ws
m= flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
load_only=["dis"], check=False,
verbose=True, forgive=False)
# load the recharge zone array
rch_zn_arr = np.loadtxt(os.path.join("_data", "rch_zn_arr.dat"), dtype=np.int)
# array-based model inputs to parameterize by layer (zero-based)
props = [["lpf.hk", 0], ["lpf.ss", 0], ["lpf.sy", 0], ["bas6.strt", 0]]
# copy to constant (global props)
const_props = props.copy()
# fill a zone-based array inputs container with recharge
# zone pars for each stress period
zone_props = []
zone_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# extend the global parameter container with recharge for each stress period
const_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# include the final simulated groundwater level in every active
# model cell as an "observation" in PEST interface
hds_kperk = [[m.nper - 1, 0]]
# parameterize WEL flux and DRN cond spatially (one par for each entry)
spatial_bc_props = [["wel.flux", 0], ["drn.cond", 0]]
# parameterize WEL flux with a single global multiplier for ecah stress period
temporal_bc_props = [["wel.flux", kper] for kper in range(m.nper)]
#create the pest interface...
ph = pyemu.helpers.PstFromFlopyModel(nam_file, org_model_ws=org_model_ws, new_model_ws=new_model_ws,
grid_props=props,
hds_kperk=hds_kperk, zone_props=zone_props, hfb_pars=True,
remove_existing=True, build_prior=False, k_zone_dict={0: rch_zn_arr},
spatial_bc_props=spatial_bc_props, temporal_bc_props=temporal_bc_props,
model_exe_name=exe_name, pp_props=props, pp_space=30, const_props=const_props)
# set the parameter bounds to Edwards-based physically-plausible values
_set_par_bounds(ph.pst, nam_file)
# geostatistcal draws from the prior
pe = ph.draw(num_reals=300, use_specsim=True)
#add the control file initial values as a realization
pe.add_base()
# enforce parameter bounds on the ensemble
pe.enforce()
# save the ensemble to compressed (PEST extended binary) format
pe.to_binary(os.path.join(new_model_ws, "prior.jcb"))
# save the control file
ph.pst.write(os.path.join(new_model_ws, nam_file.replace(".nam", ".pst")))
# read the array parameter multiplier config file and set a hard upper bound
# on specific yield
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"))
df.loc[:, "upper_bound"] = np.NaN
df.loc[:, "lower_bound"] = np.NaN
df.loc[df.org_file.apply(lambda x: "sy_" in x), "upper_bound"] = 0.25
df.to_csv(os.path.join(new_model_ws, "arr_pars.csv"))
# put the MODFLOW-2005 and PESTPP-IES binaries in the new_model_ws
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_model_ws, exe_name))
shutil.copy2(os.path.join(bin_path, ies_name), os.path.join(new_model_ws, ies_name))
def _set_par_bounds(pst, nam_file):
"""set the parameter bounds to expert-knowledge-based
ranges
Args:
pst (pyemu.Pst): PEST control file instance
nam_file (str): MODFLOW-2005 nam file
"""
par = pst.parameter_data
# special case for WEL flux pars: more recent time has metering, so less uncertainty
names = par.loc[par.pargp.apply(lambda x: "welflux" in x), "parnme"]
if nam_file == h_nam_file:
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
else:
par.loc[names, "parlbnd"] = 0.7
par.loc[names, "parubnd"] = 1.3
# DRN conductance
names = par.loc[par.pargp.apply(lambda x: "drncond" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.5
par.loc[names, "parubnd"] = 1.5
# initial conditions
names = par.loc[par.pargp.apply(lambda x: "strt" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
# recharge
names = par.loc[par.pargp.apply(lambda x: "rech" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.8
par.loc[names, "parubnd"] = 1.2
# HK
names = par.loc[par.pargp.apply(lambda x: "hk" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.01
par.loc[names, "parubnd"] = 100
def _add_smp_obs_to_pst(org_model_ws, new_model_ws, pst_name, nam_file, hds_crd_file):
"""add observations to the control file for the locations where groundwater levels
have been measured. The actual value of the observations will be set elsewhere
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace
pst_name (str): PEST control file name
nam_file (str): MODFLOW-2005 nam file
hds_crd_file (str): PEST-style coordinate file that has been processed
to include k,i,j indices
"""
# make sure the control file exists
pst_name = os.path.join(new_model_ws, pst_name)
assert os.path.exists(pst_name)
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=new_model_ws,
load_only=["dis"], check=False,
forgive=False)
# load the control file
pst = pyemu.Pst(pst_name)
# load GW level location dataframe
crd_df = pd.read_csv(hds_crd_file + ".csv")
#load DRN location dataframe
drn_df = pd.read_csv(os.path.join("_data", "DRN_dict.csv"), delim_whitespace=True,
header=None, names=["name", "k", "i", "j"])
# build a dict of name-index location for DRN locations
kij_dict = {n: [0, i, j] for n, i, j in zip(drn_df.name, drn_df.i, drn_df.j)}
# the name of the DRN budget file
cbd_file = nam_file.replace(".nam", ".cbd")
# get one from the org model workspace and update the path to it
shutil.copy2(os.path.join(org_model_ws, cbd_file), os.path.join(new_model_ws, cbd_file))
cbd_file = os.path.join(new_model_ws, cbd_file)
# setup the forward run DRN budget post processor
prec = "double"
if "win" not in platform.platform().lower(): # not win or darwin
prec = "singl"
cbd_frun, cbd_df = pyemu.gw_utils.setup_hds_timeseries(cbd_file, kij_dict, prefix="drn",
include_path=True, fill=-1.0e+30,
text="drains", precision=prec,
model=m)
# make sure the new DRN instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(cbd_file)
assert os.path.exists(ins_file), ins_file
# add the new DRN observations to the control file
pst.add_observations(ins_file=ins_file, pst_path=".")
# set meaningful obs group names
pst.observation_data.loc[cbd_df.index, "obgnme"] = cbd_df.obgnme
# build a dict of name-index locations for the GW level observations locations
kij_dict = {n: [0, i, j] for n, i, j in zip(crd_df.name, crd_df.i, crd_df.j)}
# setup GW level post processor
hds_file = os.path.join(new_model_ws, nam_file.replace(".nam", ".hds"))
assert os.path.exists(hds_file)
hds_frun, hds_df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, prefix="hds",
include_path=True, fill=-1.0e+30, model=m)
# make sure the GW level instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(hds_file)
assert os.path.exists(ins_file), ins_file
# add the GW level obs to the control file and set meaningful
# obs group names
pst.add_observations(ins_file=ins_file, pst_path=".")
pst.observation_data.loc[hds_df.index, "obgnme"] = hds_df.obgnme
# write the updated control file
pst.write(pst_name)
# add the post processor commands to the forward run script
frun_file = os.path.join(new_model_ws, "forward_run.py")
with open(frun_file, 'r') as f:
lines = f.readlines()
idx = None
for i, line in enumerate(lines):
if "__name__" in line:
idx = i
assert idx is not None
lines.insert(idx, " " + cbd_frun + '\n')
lines.insert(idx, " " + hds_frun + '\n')
with open(frun_file, 'w') as f:
for line in lines:
f.write(line)
def add_ij_to_hds_smp(crd_file):
"""intersect the GW level observation coordinates against the
model grid to get k,i,j index information
Args:
crd_file (str): PEST-style "bore coordinates" file
"""
from shapely.geometry import Point
# read the bore coord file
crd_df = pd.read_csv(crd_file, delim_whitespace=True, header=None, names=["name", "x", "y", "layer"])
# set a shapely point attribute
crd_df.loc[:, "pt"] = crd_df.apply(lambda x: Point(x.x, x.y), axis=1)
# load the history model
m = flopy.modflow.Modflow.load(h_nam_file, model_ws=h_dir,
load_only=["dis"], check=False,
forgive=False)
# use the flopy grid intersect functionality
gi = flopy.utils.GridIntersect(m.modelgrid)
crd_df.loc[:, 'ij'] = crd_df.pt.apply(lambda x: gi.intersect_point(x)[0][0])
# split out the i and j indices
crd_df.loc[:, 'i'] = crd_df.ij.apply(lambda x: x[0])
crd_df.loc[:, 'j'] = crd_df.ij.apply(lambda x: x[1])
# remove extra columns
crd_df.pop("ij")
crd_df.pop("pt")
# save the new dataframe to a CSV file
crd_df.to_csv(crd_file + ".csv")
def _set_obsvals(d, nam_file, hds_file, drn_file, pst_file, run=True):
"""samples the groundwater and spring discharge observations to
the model stress periods and sets the "obsval" attribute in the control
file. Also plots up org obs and sampled obs in a multipage pdf
Args:
d (str): directory where the control file exists
nam_file (str): MODFLOW-2005 nam file
hds_file (str): PEST-style site sample file with groundwater
level observations
drn_file (str): PEST-style site sample file with spring discharge
observations
pst_file (str): PEST control file
run (bool): flag to run PESTPP-IES with NOPTMAX=0 after the
observation values have been updated. Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=d, load_only=["dis"],
check=False, forgive=False)
# work out the stress period ending datetime
sp_end_dts = pd.to_datetime(m.start_datetime) + pd.to_timedelta(np.cumsum(m.dis.perlen.array), unit='d')
# cast the model start_datetime from a str to a datetime instance
start_datetime = pd.to_datetime(m.start_datetime)
# load the gw level and spring discharge site sample files
# into pandas dataframes
hds_df = pyemu.smp_utils.smp_to_dataframe(hds_file)
drn_df = pyemu.smp_utils.smp_to_dataframe(drn_file)
# plotting limits
xmn, xmx = pd.to_datetime(start_datetime), pd.to_datetime(sp_end_dts[-1])
ymn, ymx = hds_df.value.min(), hds_df.value.max()
# containers for the sampled observation series
hds_sampled_dfs = []
drn_sampled_dfs = []
# a function to sample each observation in a given site
# dataframe to the model stress period ending datetimes
# uses nearest neighbor
def sample_to_model(udf):
d, v = [], []
for dt, val in zip(udf.index.values, udf.value.values):
# difference between this obs datetime and the
# stress period end datetime
diff = (sp_end_dts - dt).map(np.abs).values
# the index of the minimum diff (nearest neighbor)
idxmin = np.argmin(diff)
# minimum diff in days
day_diff = diff[idxmin].astype('timedelta64[D]')
# the diff is greater than a month, something is wrong...
if day_diff > np.timedelta64(31, 'D'):
print(idxmin, sp_end_dts[idxmin], dt, day_diff)
continue
# save the datetime and value
d.append(sp_end_dts[idxmin])
v.append(val)
# form a new dataframe and return
udf_mod = pd.DataFrame({"value": v}, index=d)
return udf_mod
# save a multipage PDF for inspection
with PdfPages(os.path.join("_data", "obs.pdf")) as pdf:
ax_per_page = 10
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
# process each unique GW level site entry
for usite in hds_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = hds_df.loc[hds_df.name == usite, ["datetime", "value"]].copy()
# set the index to datetime
udf.index = udf.pop("datetime")
# sample to stress period ending datetimes
udf_mod = sample_to_model(udf)
#set a name attribute
udf_mod.loc[:, "name"] = usite
# store new sample site dataframe
hds_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
# ax.set_ylim(ymn,ymx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
#process each unqiue DRN site entry
for usite in drn_df.name.unique():
print(usite)
# get a dataframe of just this site
udf = drn_df.loc[drn_df.name == usite, ["datetime", "value"]].copy()
# use the datetime as the index
udf.index = udf.pop("datetime")
# sample to stress period ending datetime
udf_mod = sample_to_model(udf)
# set a name attribute
udf_mod.loc[:, "name"] = usite
# store
drn_sampled_dfs.append(udf_mod)
# plot
ax = axes[ax_count]
ax.plot(udf.index, udf.value, lw=0.5, marker='.', color='0.5', ms=5, alpha=0.5)
ax.plot(udf_mod.index, udf_mod.value, lw=0.5, marker='.', color='b', ms=5, alpha=0.5)
ax.set_title("site:{0}, org count:{1}, reindexed count:{2}".format(usite, udf.shape[0], udf_mod.shape[0]),
loc="left")
ax.set_xlim(xmn, xmx)
ax_count += 1
if ax_count >= ax_per_page:
plt.tight_layout()
pdf.savefig()
plt.close(fig)
fig, axes = plt.subplots(ax_per_page, 1, figsize=(8.5, 11))
ax_count = 0
plt.tight_layout()
pdf.savefig()
# concatenate the sampled GW level dataframes into one large dataframe
hds_df = | pd.concat(hds_sampled_dfs) | pandas.concat |
'''
@Description: code
@Author: MiCi
@Date: 2020-03-12 15:04:12
@LastEditTime: 2020-03-13 11:27:07
@LastEditors: MiCi
'''
import pandas as pd
import numpy as np
class Basic2(object):
def __init__(self):
return
def basic_use(self):
df = pd.DataFrame({'A': np.array([1, np.nan, 2, 3, 6, np.nan]),
'B': np.array([np.nan, 4, np.nan, 5, 9, np.nan]),
'C': 'test'})
# 重命名列名
df.columns = ['a', 'b', 'c']
print(df)
# 检查df中为null的情况
print( | pd.isnull(df) | pandas.isnull |
"""
module for mathematical modeling
"""
from typing import *
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import GridSearchCV
## for show_tree
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from IPython.display import Image
import pydot
## for ROCCurve class
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import recall_score, precision_score, f1_score
import matplotlib.pyplot as plt
## Hyperparameters for GridSearchCV
## Keep them simple. They are only for first simple analysis.
grid_params = {
"ElasticNet": {
"alpha": [0.01, 0.1, 1.0],
"l1_ratio": [0.1, 0.4, 0.7, 1]
},
"LogisticRegression": {
"C": [0.1, 1.0, 10],
"l1_ratio": [0.1, 0.5, 0.9]
},
"DecisionTree": {
'max_leaf_nodes': [3, 6, 12, 24]
},
"RandomForest": {
'n_estimators': [10, 30, 50],
'max_depth': [3, 5]
},
"XGB": {
'learning_rate': [0.01, 0.1],
'n_estimators': [5, 10, 20],
'max_depth': [5, 10]}
}
def add_prefix_to_param(prefix:str, param_grid:dict) -> Dict[str,list]:
"""
Create a param_grid for Pipeline from an "ordinary" param_grid.
:param prefix: name of the step
:param param_grid: ordinary grid_param
:return: modified dict
"""
return { "%s__%s" % (prefix,k): v for k,v in param_grid.items()}
def simple_pipeline_cv(name:str, model:BaseEstimator, param_grid:Dict[str,list],
cv:int=5, scoring:Any="accuracy", scaler:BaseEstimator=None,
return_train_score=True, **kwarg) -> GridSearchCV:
"""
Create a pipeline with only one scaler and an estimator
:param name: name of your model (e.g. rf).
:param model: Estimator (Classifier/Regressor) instance
:param param_grid: grid parameters for cross validation. Same as
:param cv: number of folds in CV
:param scoring: See https://scikit-learn.org/stable/modules/model_evaluation.html
:param scaler: Transfoer instance. The default value is MinMaxScaler()
:param return_train_score: if self.cv_results_ contains the average training scores
:param kwarg: arguments for GridSearchCV
:return: GridSearchCV instance
"""
import sklearn
if scaler is None:
scaler = MinMaxScaler()
pipeline = Pipeline([("scaler", scaler), (name, model)])
param_grid = add_prefix_to_param(name, param_grid)
## TODO: We remove this if-statement in future.
if sklearn.__version__ > "0.24":
## new version does not have the parameter iid
model = GridSearchCV(pipeline, param_grid, cv=cv, scoring=scoring, refit=True,
return_train_score=return_train_score, **kwarg)
elif kwarg.get("iid") is not None:
## if it is explicitly given
model = GridSearchCV(pipeline, param_grid, cv=cv, scoring=scoring, refit=True,
return_train_score=return_train_score, **kwarg)
else:
## if nothing is given
model = GridSearchCV(pipeline, param_grid, cv=cv, scoring=scoring, refit=True,
return_train_score=return_train_score, iid=False, **kwarg)
return model
def cv_results_summary(grid:GridSearchCV, alpha:float=0.05) -> pd.DataFrame:
"""
Make the result of CV more smaller and add confidence interval of
cross-validation scores.
:param grid: fitted instance of GridSearchCV
:param alpha: significance level (default = 0.05)
:return: DataFrame of the results of the cross-validation.
"""
## compute the confidence interval of validation scores
## Assumption: scores follow a normal distribution and its mean and standard deviation
## are not known.
df = pd.DataFrame(grid.cv_results_)
n_fold = grid.cv ## number of folds in CV
delta = df["std_test_score"]*stats.t.ppf(1-alpha/2, n_fold-1)/np.sqrt(n_fold)
df["test_CI_low"] = df["mean_test_score"] - delta
df["test_CI_high"] = df["mean_test_score"] + delta
## select columns
param_list = [k for k in grid.cv_results_.keys() if k.startswith("param_")]
cols = ["rank_test_score", "mean_test_score", "std_test_score",
"test_CI_low", "test_CI_high"]
if grid.return_train_score:
cols.append("mean_train_score")
cols.extend(param_list)
df = df[cols].copy()
df.set_index(cols[0], inplace=True)
return df.sort_index()
def pick_the_last_estimator(grid:GridSearchCV) -> BaseEstimator:
"""
Pick the "last" component of the Pipeline instance in the given GridSearchCV instance.
If the estimator of the GridSearchCV is not a Pipeline, then we give the estimator.
:param grid: fitted GridSearchCV instance
:return: estimator instance
"""
if isinstance(grid.best_estimator_, Pipeline):
## pick the last estimator
return grid.best_estimator_.steps[-1][1]
else:
return grid.best_estimator_
def show_coefficients(grid:GridSearchCV, columns:List[str]) -> pd.DataFrame:
"""
show the regression coefficients of the trained model.
WARNING: If you have a fitted Pipeline instance, then we pick the last estimator in it.
And the coefficients you see are coefficients after other Transformers such as MinMaxScaler.
While you can safely compare fields by looking at the coefficients, you can not interpret
the coefficients in an original scale.
This function works if the last component of the pipeline has .coef_ and intercept_ as
attributes.
:param grid: fitted GridSearchCV instance
:param columns: list of columns
:return: DataFrame of coefficients
"""
## TODO: change the name of the method, because show_* must return a figure
model = pick_the_last_estimator(grid)
if not hasattr(model, "coef_") or not hasattr(model, "intercept_"):
raise Exception("You probably have no linear model")
if hasattr(model,"classes_"):
## classification
## If we have a binary classifier, model.coef_ is an array of shape (1,p),
## but model.class_ contains two classes. Thus we need to pick the positive class
labels = model.classes_ if len(model.classes_) > 2 else [model.classes_[1]]
else:
## regression
labels = ["coefficient"]
df_coef = pd.DataFrame(model.coef_.T, index=columns, columns=labels)
df_intercept = | pd.DataFrame([model.intercept_], index=["intercept"], columns=labels) | pandas.DataFrame |
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-20"), "2015ESM"),],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[( | pd.Timestamp("2015-03-13") | pandas.Timestamp |
import numpy as np
import pandas as pd
from scipy.stats import norm, rankdata
from scipy import spatial
class GeostatsDataFrame(object):
"""A class to load an transform a table of xy + feature values into
a compliant dataframe for variogram calculations"""
coord_cols = {'x':'x', 'y':'y'}
random_seed = np.random.seed(73073)
nscore_epsilon = 1.0e-20
def __init__(self, filepath = None, pd_df = None):
if filepath is not None:
self.input = | pd.read_csv(filepath) | pandas.read_csv |
import email
import pandas as pd
def extract(data, structured_fields=[], extract_payload=True):
r"""This function extracts data for the given header list from the Enron email dataset.
It provides flexibilty to choose which fields needs to be extracted.
The header list provided by the user are the tags in the email of Enron dataset, eg. Date, Subject etc.
By default, if no header is provided, this function returns only the email text body of the Enron dataset.
Arguments:
1) data: Dataframe It is the Enron dataset with column headings. This argument can not be kept empty.
2) structured_fields: List It is a of tags for which data needs to be extracted. Example: ['Date', 'Subject', 'X-To']. This argument can be droppped if not required.
3) extract_pyload: Boolean True if email text body is required. False in case only structured_fields needs to be extracted. This field can alo be dropped while calling the function. In case nothing is specified, default boolean value True is used.
return: Dataframe A dataframe with specified fields along with the original columns passsed as the data argument.
This function is created to take off the burden of extracting desired fields from the Enron dataset. However, this does not clean the data, eg. it does not remove the empty rows or columns. Neither it does the pre-processing of data like lowercase and removal of unwanted characters.
In order to make it more powerful, above functions can be added.
"""
headers=data.columns
emails = data.rename(columns={headers[0]:'email_path', headers[1]:'email'})
#getting structured text
def create_dict(dictionary, key, value):
if key in dictionary:
values = dictionary.get(key)
values.append(value)
dictionary[key] = values
else:
dictionary[key] = [value]
return dictionary
def get_structured_data(df, fields):
structured_data = {}
messages = df["email"]
for message in messages:
e = email.message_from_string(message)
for header in fields:
header_data = e.get(header)
create_dict(dictionary = structured_data, key = header, value = header_data)
return | pd.DataFrame(structured_data) | pandas.DataFrame |
from typing import Dict, List, Tuple
import pandas as pd
import requests
from dash import dash_table as dt
from codecarbon.core.emissions import Emissions
from codecarbon.input import DataSource, DataSourceException
class Data:
def __init__(self):
self._data_source = DataSource()
self._emissions = Emissions(self._data_source)
@staticmethod
def get_project_data(df: pd.DataFrame, project_name) -> dt.DataTable:
project_df = df[df.project_name == project_name]
project_df = project_df.sort_values(by="timestamp")
project_data = project_df.to_dict("rows")
columns = [{"name": column, "id": column} for column in project_df.columns]
return dt.DataTable(data=project_data, columns=columns)
@staticmethod
def get_project_summary(project_data: List[Dict]):
last_run = project_data[-1]
project_summary = {
"last_run": {
"timestamp": last_run["timestamp"],
"duration": last_run["duration"],
"emissions": round(last_run["emissions"], 1),
"energy_consumed": round((last_run["energy_consumed"]), 1),
},
"total": {
"duration": sum(
map(lambda experiment: experiment["duration"], project_data)
),
"emissions": sum(
map(lambda experiment: experiment["emissions"], project_data)
),
"energy_consumed": sum(
map(lambda experiment: experiment["energy_consumed"], project_data)
),
},
"country_name": last_run["country_name"],
"country_iso_code": last_run["country_iso_code"],
"region": last_run["region"],
"on_cloud": last_run["on_cloud"],
"cloud_provider": last_run["cloud_provider"],
"cloud_region": last_run["cloud_region"],
}
return project_summary
def get_car_miles(self, project_carbon_equivalent: float):
"""
8.89 × 10-3 metric tons CO2/gallon gasoline ×
1/22.0 miles per gallon car/truck average ×
1 CO2, CH4, and N2O/0.988 CO2
= 4.09 x 10-4 metric tons CO2E/mile
= 0.409 kg CO2E/mile
Source: EPA
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: number of miles driven by avg car
"""
return "{:.0f}".format(project_carbon_equivalent / 0.409)
def get_tv_time(self, project_carbon_equivalent: float):
"""
Gives the amount of time
a 32-inch LCD flat screen TV will emit
an equivalent amount of carbon
Ratio is 0.097 kg CO2 / 1 hour tv
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: equivalent TV time
"""
time_in_minutes = project_carbon_equivalent * (1 / 0.097) * 60
formated_value = "{:.0f} minutes".format(time_in_minutes)
if time_in_minutes >= 60:
time_in_hours = time_in_minutes / 60
formated_value = "{:.0f} hours".format(time_in_hours)
if time_in_hours >= 24:
time_in_days = time_in_hours / 24
formated_value = "{:.0f} days".format(time_in_days)
return formated_value
def get_household_fraction(self, project_carbon_equivalent: float):
"""
Total CO2 emissions for energy use per home: 5.734 metric tons CO2 for electricity
+ 2.06 metric tons CO2 for natural gas + 0.26 metric tons CO2 for liquid petroleum gas
+ 0.30 metric tons CO2 for fuel oil = 8.35 metric tons CO2 per home per year / 52 weeks
= 160.58 kg CO2/week on average
Source: EPA
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: % of weekly emissions re: an average American household
"""
return "{:.2f}".format((project_carbon_equivalent / 160.58) * 100)
def get_global_emissions_choropleth_data(
self, net_energy_consumed: float
) -> List[Dict]:
def formatted_energy_percentage(energy_type: float, total: float) -> float:
return float("{:.1f}".format((energy_type / total) * 100))
global_energy_mix = self._data_source.get_global_energy_mix_data()
choropleth_data = []
for country_iso_code in global_energy_mix.keys():
country_name = global_energy_mix[country_iso_code]["country_name"]
if country_iso_code not in ["_define", "ATA"]:
from codecarbon.core.units import Energy
energy_consumed = Energy.from_energy(kWh=net_energy_consumed)
from codecarbon.external.geography import GeoMetadata
country_emissions = self._emissions.get_country_emissions(
energy_consumed,
GeoMetadata(
country_name=country_name, country_iso_code=country_iso_code
),
)
total = global_energy_mix[country_iso_code]["total_TWh"]
choropleth_data.append(
{
"iso_code": country_iso_code,
"emissions": country_emissions,
"country": country_name,
"fossil": formatted_energy_percentage(
global_energy_mix[country_iso_code]["fossil_TWh"], total
),
"geothermal": formatted_energy_percentage(
global_energy_mix[country_iso_code]["geothermal_TWh"], total
),
"hydroelectricity": formatted_energy_percentage(
global_energy_mix[country_iso_code]["hydroelectricity_TWh"],
total,
),
"nuclear": formatted_energy_percentage(
global_energy_mix[country_iso_code]["nuclear_TWh"], total
),
"solar": formatted_energy_percentage(
global_energy_mix[country_iso_code]["solar_TWh"], total
),
"wind": formatted_energy_percentage(
global_energy_mix[country_iso_code]["wind_TWh"], total
),
}
)
return choropleth_data
def get_regional_emissions_choropleth_data(
self, net_energy_consumed: float, country_iso_code: str
) -> List[Dict]:
# add country codes here to render for different countries
if country_iso_code.upper() not in ["USA", "CAN"]:
return [{"region_code": "", "region_name": "", "emissions": ""}]
try:
region_emissions = self._data_source.get_country_emissions_data(
country_iso_code.lower()
)
except DataSourceException: # This country has regional data at the energy mix level, not the emissions level
country_energy_mix = self._data_source.get_country_energy_mix_data(
country_iso_code.lower()
)
region_emissions = {
region: {"regionCode": region}
for region, energy_mix in country_energy_mix.items()
}
choropleth_data = []
for region_name in region_emissions.keys():
region_code = region_emissions[region_name]["regionCode"]
if region_name not in ["_unit"]:
from codecarbon.core.units import Energy
energy_consumed = Energy.from_energy(kWh=net_energy_consumed)
from codecarbon.external.geography import GeoMetadata
emissions = self._emissions.get_region_emissions(
energy_consumed,
GeoMetadata(country_iso_code=country_iso_code, region=region_name),
)
choropleth_data.append(
{
"region_code": region_code,
"region_name": region_name.upper(),
"emissions": emissions,
}
)
return choropleth_data
def get_cloud_emissions_barchart_data(
self,
net_energy_consumed: float,
on_cloud: str,
cloud_provider: str,
cloud_region: str,
) -> Tuple[str, pd.DataFrame]:
if on_cloud == "N":
return (
"",
pd.DataFrame(data={"region": [], "emissions": [], "country_name": []}),
)
cloud_emissions = self._data_source.get_cloud_emissions_data()
cloud_emissions = cloud_emissions[
["provider", "providerName", "region", "impact", "country_name"]
]
from codecarbon.core.units import EmissionsPerKWh
cloud_emissions["emissions"] = cloud_emissions.apply(
lambda row: EmissionsPerKWh.from_g_per_kWh(row.impact).kgs_per_kWh
* net_energy_consumed,
axis=1,
)
cloud_emissions_project_region = cloud_emissions[
cloud_emissions.region == cloud_region
]
cloud_emissions = cloud_emissions[
(cloud_emissions.provider == cloud_provider)
& (cloud_emissions.region != cloud_region)
].sort_values(by="emissions")
return (
cloud_emissions_project_region.iloc[0, :].providerName,
| pd.concat([cloud_emissions_project_region, cloud_emissions]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 00:27:23 2018
@author: amal
"""
import pandas as pd
import os.path
import requests
import time
from google.transit import gtfs_realtime_pb2
route_name_subset = ['Green-B',
'Green-C',
'Green-D',
'Green-E',
'Orange',
'Red',
'Blue']
def make_trip_object(t, updated):
trip = {}
trip['trip_id'] = t.trip_id
trip['start_date'] = t.start_date
trip['route_id'] = t.route_id
trip['updated'] = updated
return trip
def make_stop_time_update_object(t, stu, updated):
stop_time_update = {}
stop_time_update['trip_id'] = t.trip_id
if stu.departure:
stop_time_update['departure'] = stu.departure.time
if stu.arrival:
stop_time_update['arrival'] = stu.arrival.time
stop_time_update['stop_id'] = stu.stop_id
stop_time_update['updated'] = updated
return stop_time_update
def make_vehicle_object(v, updated):
vehicle = {}
vehicle['vehicle_id'] = v.vehicle.id
vehicle['trip_id'] = v.trip.trip_id
vehicle['schedule_relationship'] = v.trip.schedule_relationship
vehicle['route_id'] = v.trip.route_id
vehicle['direction_id'] = v.trip.direction_id
vehicle['latitude'] = v.position.latitude
vehicle['longitude'] = v.position.longitude
#vehicle['bearing'] = v.position.bearing
vehicle['curr_stop_sequence'] = v.current_stop_sequence
vehicle['curr_status'] = v.current_status
vehicle['server_time'] = v.timestamp
vehicle['stop_id'] = v.stop_id
vehicle['system_time'] = updated
return vehicle
def get_gtfs_realtime_vehicle_positions(vehicle_position_url):
feed = gtfs_realtime_pb2.FeedMessage()
vehicle_position_data = requests.get(vehicle_position_url).content
feed.ParseFromString(vehicle_position_data)
trips = []
stop_time_updates = []
vehicles = []
updated = int(time.time())
for entity in feed.entity:
if entity.HasField('trip_update'):
trips.append(make_trip_object(entity.trip_update.trip, updated))
for s in entity.trip_update.stop_time_update:
stop_time_updates.append(make_stop_time_update_object(entity.trip_update.trip, s, updated))
elif entity.HasField('vehicle'):
vehicles.append(make_vehicle_object(entity.vehicle, updated))
current_vehicle_positions = pd.DataFrame(vehicles)
current_vehicle_positions = current_vehicle_positions.loc[current_vehicle_positions['route_id'].isin(route_name_subset)]
return(current_vehicle_positions)
def get_weather():
CITY_ID = '4930956'
START = '1541177994'
END = '1541625811'
API_URL = 'http://history.openweathermap.org/data/2.5/history/'
API_KEY = '&APPID=f2945dde296e86ae509e15d26ded0bb1'
CITY_COMP = '&city?id=' + CITY_ID
TYPE_COMP = '&type=hour'
START_COMP = '&start=' + START
END_COMP = '&end='+ END
URL = API_URL + API_KEY + CITY_COMP + TYPE_COMP + START_COMP + END_COMP
data = pd.read_csv('../datasets.nosync/weather_info.csv')
def interpolate_data_intermediate():
one_week_data = | pd.read_pickle('../datasets.nosync/data_intermediate.pkl') | pandas.read_pickle |
import asyncio
import copy
import json
from datetime import date, timedelta
from typing import Dict, Tuple
import pandas as pd
from pytz import timezone
from tqdm import tqdm
from liualgotrader.common import config
from liualgotrader.common.data_loader import DataLoader # type: ignore
from liualgotrader.common.database import fetch_as_dataframe
from liualgotrader.common.tlog import tlog
from liualgotrader.models.accounts import Accounts
from liualgotrader.models.optimizer import OptimizerRun
from liualgotrader.models.portfolio import Portfolio
from liualgotrader.trading.trader_factory import trader_factory
est = timezone("America/New_York")
def portfolio_return(
env: str, start_date: date
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
df = load_trades_for_period(
env, start_date, date.today() + timedelta(days=1)
)
# day X strategy = total return
table: Dict = {}
invested_table: Dict = {}
percentages_table: Dict = {}
for index, row in df.iterrows():
d = pd.Timestamp(pd.to_datetime(row["client_time"]).date(), tz=est)
if d not in table:
table[d] = {}
invested_table[d] = {}
percentages_table[d] = {}
table[d]["revenue"] = 0.0
invested_table[d]["invested"] = 0.0
strat = row["algo_name"]
if strat not in table[d]:
table[d][strat] = 0.0
invested_table[d][strat] = 0.0
percentages_table[d][strat] = 0.0
delta = (
(1.0 if row["operation"] == "sell" and row["qty"] > 0 else -1.0)
* float(row["price"])
* row["qty"]
)
table[d][strat] += delta
table[d]["revenue"] += delta
if row["operation"] == "buy":
invested_table[d][strat] += row["qty"] * float(row["price"])
invested_table[d]["invested"] += row["qty"] * float(row["price"])
percentages_table[d][strat] = (
(100.0 * table[d][strat] / invested_table[d][strat])
if invested_table[d][strat] != 0
else 0.0
)
percentages_table[d]["revenue"] = (
(100.0 * table[d]["revenue"] / invested_table[d]["invested"])
if invested_table[d][strat] != 0
else 0.0
)
return (
pd.DataFrame.from_dict(table, orient="index").sort_index(),
pd.DataFrame.from_dict(invested_table, orient="index").sort_index(),
pd.DataFrame.from_dict(percentages_table, orient="index").sort_index(),
)
def load_trades_for_period(
env: str, from_date: date, to_date: date
) -> pd.DataFrame:
query = f"""
SELECT client_time, symbol, operation, qty, price, algo_name
FROM
new_trades as t, algo_run as a
WHERE
t.algo_run_id = a.algo_run_id AND
t.tstamp >= '{from_date}' AND
t.tstamp < '{to_date}' AND
t.expire_tstamp is null AND
a.algo_env = '{env}'
ORDER BY symbol, tstamp
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(fetch_as_dataframe(query))
def load_trades(day: date, end_date: date = None) -> pd.DataFrame:
query = f"""
SELECT t.*, a.batch_id, a.algo_name
FROM
new_trades as t, algo_run as a
WHERE
t.algo_run_id = a.algo_run_id AND
t.tstamp >= '{day}' AND
t.tstamp < '{day + timedelta(days=1) if not end_date else end_date}' AND
t.expire_tstamp is null
ORDER BY symbol, tstamp
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(fetch_as_dataframe(query))
def load_client_trades(day: date, end_date: date = None) -> pd.DataFrame:
query = f"""
SELECT t.*, a.batch_id, a.algo_name
FROM
new_trades as t, algo_run as a
WHERE
t.algo_run_id = a.algo_run_id AND
t.tstamp >= '{day}' AND
t.tstamp < '{day + timedelta(days=1) if not end_date else end_date}' AND
t.expire_tstamp is null
ORDER BY symbol, tstamp
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(fetch_as_dataframe(query))
async def aload_trades_by_batch_id(batch_id: str) -> pd.DataFrame:
query = f"""
SELECT
t.*, a.batch_id, a.start_time, a.algo_name
FROM
new_trades as t, algo_run as a
WHERE
t.algo_run_id = a.algo_run_id AND
a.batch_id = '{batch_id}' AND
t.expire_tstamp is null
ORDER BY symbol, tstamp
"""
df: pd.DataFrame = await fetch_as_dataframe(query)
try:
if not df.empty:
df["client_time"] = | pd.to_datetime(df["client_time"]) | pandas.to_datetime |
"""
Note: Utils in this file are
Usage:
If the first cell ran correctly, changing the CWD to the Jupyter file and adding '.' to sys path, then.
import Notebooks.Clustering.cluster_utils as cu
Otherwise:
from google.colab import drive
drive.mount('/content/drive')
import os
FIELDDAY_DIR = '/content/drive/My Drive/Field Day' # the field day directory on the mounted drive
JUPYTER_DIR = os.path.join(FIELDDAY_DIR,'Research and Writing Projects/2020 Lakeland EDM/Jupyter')
os.chdir(JUPYTER_DIR)
import sys
sys.path.append('.')
import Notebooks.Clustering.cluster_utils as cu
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
import urllib.request
import utils as utils
import ipywidgets as widgets
from collections import namedtuple
from io import BytesIO
from matplotlib import pyplot as plt
from scipy import stats
from typing import Optional, List, Iterable
from zipfile import ZipFile
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import plot_precision_recall_curve, plot_confusion_matrix, plot_roc_curve
from sklearn.metrics import f1_score, roc_auc_score, roc_curve, accuracy_score
from datetime import datetime
import pickle
from collections import Counter
def print_options(meta):
"""
Takes in meta text and outputs text for an options group.
:param meta: meta text. Expected format will be like:
Metadata:
Import from fhttps://opengamedata.fielddaylab.wisc.edu/data/LAKELAND/LAKELAND_20191201_to_20191231_de09c18_proc.zip
Import from fData/Raw Log Data/LAKELAND_20200101_to_20200131_a9720c1_proc.zip
*arg* filter_args = {'query_list': ['debug == 0', 'sess_ActiveEventCount >= 10', 'sessDuration >= 300', '_continue == 0'], 'one_query': False, 'fillna': 0, 'verbose': True}
Query: Intial Shape, output_shape: (32227, 1647)
Query: debug == 0, output_shape: (32221, 1647)
Query: sess_ActiveEventCount >= 10, output_shape: (26934, 1647)
Query: sessDuration >= 300, output_shape: (16109, 1647)
Query: _continue == 0, output_shape: (10591, 1647)
Filled NaN with 0
*arg* new_feat_args = {'verbose': False, 'avg_tile_hover_lvl_range': None}
*arg* lvlfeats = ['count_blooms', 'count_deaths', 'count_farmfails', 'count_food_produced', 'count_milk_produced']
*arg* lvlrange = range(0, 2)
Describe Level Feats lvls 0 to 1. Assuming WINDOW_SIZE_SECONDS=300 and WINDOW_OVERLAP_SECONDS=30, filtered by (sessDuration > 570)
*arg* finalfeats = ['avg_lvl_0_to_1_count_deaths', 'avg_lvl_0_to_1_count_farmfails', 'avg_lvl_0_to_1_count_food_produced', 'avg_lvl_0_to_1_count_milk_produced']
Original Num Rows: 6712
*arg* zthresh = 3
Removed points with abs(ZScore) >= 3. Reduced num rows: 6497
where all args are denoted by an initial *arg* and values are after =.
"""
if type(meta) == str:
meta = meta.split('\n')
inner = ',\n\t'.join(["'GAME'", "'NAME'"] + [l[6:].split(' = ')[1]
for l in meta if l.startswith('*arg*')] + ['[]'])
print(f'options({inner}\n)')
def openZipFromURL(url):
"""
:param url: url pointing to a zipfile
:return: zipfile object, list of metadata lines
"""
metadata = [f'Import from f{url}']
resp = urllib.request.urlopen(url)
zipfile = ZipFile(BytesIO(resp.read()))
return zipfile, metadata
def openZipFromPath(path):
"""
:param path: path pointing to a zipfile
:return: zipfile object, list of metadata lines
"""
metadata = [f'Import from f{path}']
zipfile = ZipFile(path)
return zipfile, metadata
def readCSVFromPath(path, index_cols):
"""
:param path: path pointing to a csv
:return: dataframe, List[str] of metadata lines
"""
import os
print(os.getcwd())
metadata = [f'Import from f{path}']
df = pd.read_csv(path, index_col=index_cols, comment='#')
return df, metadata
def getZippedLogDFbyURL(proc_zip_urls, index_cols=['sessionID']):
"""
:param proc_urls: List of urls to proc data file zips.
:param index_cols: List of columns to be treated as index columns.
:return: (df, metadata List[str])
"""
# get the data
metadata = []
df = pd.DataFrame()
for next_url in proc_zip_urls:
zf, meta = openZipFromURL(next_url)
# put the data into a dataframe
with zf.open(zf.namelist()[0]) as f:
df = pd.concat(
[df, pd.read_csv(f, index_col=index_cols, comment='#')], sort=True)
metadata.extend(meta)
if len(index_cols) > 1:
for i, col_name in enumerate(index_cols):
df[col_name] = [x[i] for x in df.index]
else:
df[index_cols[0]] = [x for x in df.index]
return df, metadata
def getLogDFbyPath(proc_paths, zipped=True, index_cols=['sessionID']):
"""
:param proc_paths: List of paths to proc data files.
:param zipped: True if files are zipped, false if just CSVs (default True).
:param index_cols: List of columns to be treated as index columns.
:return: (df, metadata List[str])
"""
# get the data
metadata = []
df = pd.DataFrame()
for next_path in proc_paths:
if zipped:
next_file, meta = openZipFromPath(next_path)
# put the data into a dataframe
with next_file.open(next_file.namelist()[0]) as f:
df = pd.concat(
[df, pd.read_csv(f, index_col=index_cols, comment='#')], sort=True)
else: # CSVs, not zips
next_file, meta = readCSVFromPath(next_path, index_cols)
# put the data into a dataframe
df = pd.concat([df, next_file], sort=True)
metadata.extend(meta)
if len(index_cols) > 1:
for i, col_name in enumerate(index_cols):
df[col_name] = [x[i] for x in df.index]
else:
df[index_cols[0]] = [x for x in df.index]
return df, metadata
# consider making a general version with parameter for filename, index columns
# def getLakelandDecJanLogDF():
# """
# :return: (df, metadata List[str])
# """
# # define paths for DecJanLog
# _proc_zip_url_dec = 'https://opengamedata.fielddaylab.wisc.edu/data/LAKELAND/LAKELAND_20191201_to_20191231_de09c18_proc.zip'
# _proc_zip_path_jan = 'Data/Raw Log Data/LAKELAND_20200101_to_20200131_a9720c1_proc.zip'
# # get the data
# metadata = []
# zipfile_dec, meta = openZipFromURL(_proc_zip_url_dec)
# metadata.extend(meta)
# zipfile_jan, meta = openZipFromPath(_proc_zip_path_jan)
# metadata.extend(meta)
# # put the data into a dataframe
# df = pd.DataFrame()
# for zf in [zipfile_dec, zipfile_jan]:
# with zf.open(zf.namelist()[0]) as f:
# df = pd.concat([df, pd.read_csv(f, index_col=['sessID', 'num_play'], comment='#')], sort=True)
# df['sessID'] = [x[0] for x in df.index]
# df['num_play'] = [x[1] for x in df.index]
# return df, metadata
def get_lakeland_default_filter(lvlstart: Optional[int] = None, lvlend: Optional[bool] = None, no_debug: Optional[bool] = True,
min_sessActiveEventCount: Optional[int] = 10,
min_lvlstart_ActiveEventCount: Optional[int] = 3,
min_lvlend_ActiveEventCount: Optional[int] = 3, min_sessDuration: Optional[int] = 300, max_sessDuration: Optional[int] = None, cont: Optional[bool] = False) -> List[str]:
"""
:param lvlstart: levelstart to be used for other parameters (None if not used)
:param lvlend: levelend to be used for other parameters (None if not used)
:param no_debug: boolean whether or not to use only players that have used SPYPARTY or only not used SPYPARTY (None if not used)
:param min_sessActiveEventCount: (None if not used)
:param min_lvlstart_ActiveEventCount: (None if not used)
:param min_lvlend_ActiveEventCount: (None if not used)
:param min_sessDuration: (None if not used)
:param max_sessDuration: (None if not used)
:param cont: (None if not used)
:return:
"""
get_lakeland_default_filter()
query_list = []
if no_debug:
query_list.append('debug == 0')
if min_sessActiveEventCount is not None:
query_list.append(
f'sess_ActiveEventCount >= {min_sessActiveEventCount}')
if lvlstart is not None and min_lvlstart_ActiveEventCount is not None:
query_list.append(
f'lvl{lvlstart}_ActiveEventCount >= {min_lvlstart_ActiveEventCount}')
if lvlend is not None and min_lvlend_ActiveEventCount is not None:
query_list.append(
f'lvl{lvlend}_ActiveEventCount >= {min_lvlend_ActiveEventCount}')
if min_sessDuration is not None:
query_list.append(f'sessDuration >= {min_sessDuration}')
if max_sessDuration is not None:
query_list.append(f'sessDuration <= {max_sessDuration}')
if cont is not None:
query_list.append(f'_continue == {int(cont)}')
return query_list
# split out query creation per-game
def filter_df(df: pd.DataFrame, query_list: List[str], one_query: bool = False, fillna: object = 0, verbose: bool = True) -> (pd.DataFrame, List[str]):
"""
:param df: dataframe to filter
:param query_list: list of queries for filter
:param one_query: bool to do the query (faster) as one query or seperate queries (slower, gives more info)
:param fillna: value to fill NaNs with
:param verbose: whether to input information
:return: (df, List[str])
"""
df = df.rename({'continue': '_continue'}, axis=1)
filter_args = locals()
filter_args.pop('df')
filter_meta = [f'*arg* filter_args = {filter_args}']
def append_meta_str(q, shape):
outstr = f'Query: {q}, output_shape: {shape}'
filter_meta.append(outstr)
if verbose:
print(outstr)
append_meta_str('Intial Shape', df.shape)
if not one_query:
for q in query_list:
df = df.query(q)
append_meta_str(q, df.shape)
else: # do the whole query at once
full_query = ' & '.join([f"({q})" for q in query_list])
print('full_query:', full_query)
df = df.query(full_query)
append_meta_str(full_query, df.shape)
if fillna is not None:
df = df.fillna(fillna)
filter_meta.append(f'Filled NaN with {fillna}')
return df.rename({'_continue': 'continue'}), filter_meta
def create_new_base_features(df, verbose=False):
"""
Currently a stub. Used to create new features from existing ones. See create_new_base_features_lakeland for example.
:param df:
:param verbose:
:return:
"""
new_base_feature_args = locals()
new_base_feature_args.pop('df')
new_feat_meta = [f'*arg* new_feat_args = {new_base_feature_args}']
return df, new_feat_meta
# def describe_lvl_feats_lakeland(df, fbase_list, lvl_range, level_time=300, level_overlap=30):
# """
# Calculates sum/avg of given level base features (fnames without lvlN_ prefix) in the level range.
# Will automatically filter out players who did not complete the given level range in the df
# May have a bug.
# :param level_time: number of seconds per level (window)
# :param level_overlap: number of overlap seconds per level (window)
# :rtype: (df, List[str]) where the new df includes sum_ and avg_lvl_A_to_B.
# :param df: dataframe to pull from and append to
# :param fbase_list: list of feature bases (fnames without lvlN_ prefix)
# :param lvl_range: range of levels to choose. typically range(min_level, max_level+1)
# """
# metadata = []
# metadata.append(f'*arg* lvlfeats = {fbase_list}')
# metadata.append(f'*arg* lvlrange = {lvl_range}')
# if not fbase_list:
# return df, metadata
# lvl_start, lvl_end = lvl_range[0], lvl_range[-1]
# query = f'sessDuration > {(level_time - level_overlap) * (lvl_end) + level_time}'
# df = df.query(query)
# metadata.append(
# f'Describe Level Feats lvls {lvl_start} to {lvl_end}. Assuming WINDOW_SIZE_SECONDS={level_time} and WINDOW_OVERLAP_SECONDS={level_overlap}, filtered by ({query})')
# fromlvl, tolvl = lvl_range[0], lvl_range[-1]
# sum_prefix = f'sum_lvl_{fromlvl}_to_{tolvl}_'
# avg_prefix = f'avg_lvl_{fromlvl}_to_{tolvl}_'
# for fn in fbase_list:
# tdf = df[[f'lvl{i}_{fn}' for i in lvl_range]].fillna(0).copy()
# df[sum_prefix + fn] = tdf.sum(axis=1)
# df[avg_prefix + fn] = tdf.mean(axis=1)
# return df, metadata
def describe_lvl_feats(df, fbase_list, lvl_range):
"""
Calculates sum/avg of given level base features (fnames without lvlN_ prefix) in the level range.
May have a bug.
:rtype: (df, List[str]) where the new df includes sum_ and avg_lvl_A_to_B
:param df: dataframe to pull from and append to
:param fbase_list: list of feature bases (fnames without lvlN_ prefix)
:param lvl_range: range of levels to choose. typically range(min_level, max_level+1)
"""
metadata = []
metadata.append(f'*arg* lvlfeats = {fbase_list}')
metadata.append(f'*arg* lvlrange = {lvl_range}')
if not fbase_list:
return df, metadata
# TODO: Add filter for levels we don't want, like the one from lakeland
# query = f'sessDuration > {(level_time - level_overlap) * (lvl_end) + level_time}'
# df = df.query(query)
# metadata.append(
# f'Describe Level Feats lvls {lvl_start} to {lvl_end}. Assuming WINDOW_SIZE_SECONDS={level_time} and WINDOW_OVERLAP_SECONDS={level_overlap}, filtered by ({query})')
fromlvl, tolvl = lvl_range[0], lvl_range[-1]
sum_prefix = f'sum_lvl_{fromlvl}_to_{tolvl}_'
avg_prefix = f'avg_lvl_{fromlvl}_to_{tolvl}_'
for fn in fbase_list:
tdf = df[[f'lvl{i}_{fn}' for i in lvl_range]].fillna(0)
df[sum_prefix + fn] = tdf.sum(axis=1)
df[avg_prefix + fn] = tdf.mean(axis=1)
return df, metadata
def describe_range_feats(df, range_feats_and_range, cc_prefix_max_list):
"""
Calculates sum/avg of given level base features (fnames without lvlN_ prefix) in the level range.
May have a bug.
:rtype: (df, List[str]) where the new df includes sum_ and avg_lvl_A_to_B
:param df: dataframe to pull from and append to
:param fbase_list: list of feature bases (fnames without lvlN_ prefix)
:param lvl_range: range of levels to choose. typically range(min_level, max_level+1)
"""
metadata = []
metadata.append(f'*arg* range_feats_and_range = {range_feats_and_range}')
metadata.append(f'*arg* cc_prefix_max_list = {cc_prefix_max_list}')
if not range_feats_and_range:
return df, metadata
# TODO: Add filter for levels we don't want, like the one from lakeland
# query = f'sessDuration > {(level_time - level_overlap) * (lvl_end) + level_time}'
# df = df.query(query)
# metadata.append(
# f'Describe Level Feats lvls {lvl_start} to {lvl_end}. Assuming WINDOW_SIZE_SECONDS={level_time} and WINDOW_OVERLAP_SECONDS={level_overlap}, filtered by ({query})')
range_prefix_max_list = [('lvl', None)]+cc_prefix_max_list
for i in range(len(range_feats_and_range)):
range_feats, rang = range_feats_and_range[i]
prefix, _ = range_prefix_max_list[i]
fromval, toval = rang[0], rang[-1]
sum_prefix = f'sum_{prefix}_{fromval}_to_{toval}_'
avg_prefix = f'avg_{prefix}_{fromval}_to_{toval}_'
for fn in range_feats:
tdf = df[[f'{prefix}{i}_{fn}' for i in rang]].fillna(0)
df[sum_prefix + fn] = tdf.sum(axis=1)
df[avg_prefix + fn] = tdf.mean(axis=1)
return df, metadata
def get_feat_selection_lakeland(df, max_lvl=9):
"""
Gets the feature selection widget.
:param df:
:param max_lvl:
:return:
"""
start_level = widgets.IntSlider(value=0, min=0, max=max_lvl, step=1, description='Start Level:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d')
end_level = widgets.IntSlider(value=0, min=0, max=max_lvl, step=1, description='End Level:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d')
level_selection = widgets.GridBox([start_level, end_level])
def change_start_level(change):
end_level.min = start_level.value
if end_level.value < start_level.value:
end_level.value = start_level.value
start_level.observe(change_start_level, names="value")
lvl_feats = sorted(set([f[5:] for f in df.columns if f.startswith('lvl')]))
sess_feats = sorted(
set([f[5:] for f in df.columns if f.startswith('sess_')]))
other_feats = sorted(set([f for f in df.columns if not f.startswith(
'lvl') and not f.startswith('sess_')]))
selection_widget = widgets.GridBox([multi_checkbox_widget(lvl_feats, 'lvl'),
multi_checkbox_widget(
sess_feats, 'sess'),
multi_checkbox_widget(
other_feats, 'other'),
level_selection],
layout=widgets.Layout(grid_template_columns=f"repeat(3, 500px)"))
return selection_widget
def get_feat_selection(df, session_prefix, max_lvl, cc_prefix_max_list=None):
"""
Gets the feature selection widget.
:param df:
:param max_lvl:
:return:
"""
cc_prefix_max_list = cc_prefix_max_list or []
checkbox_widgets = []
slider_widgets = []
feats = set()
for prefix, max_val in [('lvl', max_lvl)] + cc_prefix_max_list:
start_val = widgets.IntSlider(value=0, min=0, max=max_val, step=1, description=f'Start {prefix}:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True,
readout_format='d')
end_val = widgets.IntSlider(value=0, min=0, max=max_val, step=1, description=f'End {prefix}:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True,
readout_format='d')
val_selection = widgets.GridBox([start_val, end_val])
slider_widgets.append(val_selection)
val_feats_set = set(['_'.join(f.split('_')[1:])
for f in df.columns if f.startswith(prefix)])
feats = feats.union(
[f'{prefix}{n}_{v}' for n in range(max_val+1) for v in val_feats_set])
val_feats = sorted(val_feats_set)
val_feats_checkbox = multi_checkbox_widget(val_feats, prefix)
checkbox_widgets.append(val_feats_checkbox)
other_feats = sorted(set(df.columns).difference(feats))
selection_widget = widgets.GridBox(checkbox_widgets+slider_widgets+[multi_checkbox_widget(other_feats, 'other')],
layout=widgets.Layout(grid_template_columns=f"repeat({len(slider_widgets)}, 500px)"))
return selection_widget
def get_feat_selection_waves(df, max_lvl=34):
"""
Gets the feature selection widget.
:param df:
:param max_lvl:
:return:
"""
start_level = widgets.IntSlider(value=0, min=0, max=max_lvl, step=1, description='Start Level:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True,
readout_format='d')
end_level = widgets.IntSlider(value=0, min=0, max=max_lvl, step=1, description='End Level:',
disabled=False, continuous_update=False, orientation='horizontal', readout=True,
readout_format='d')
level_selection = widgets.GridBox([start_level, end_level])
def change_start_level(change):
end_level.min = start_level.value
if end_level.value < start_level.value:
end_level.value = start_level.value
start_level.observe(change_start_level, names="value")
lvl_feats = sorted(set([''.join(f.split('_')[1:])
for f in df.columns if f.startswith('lvl')]))
sess_feats = sorted(
set([f[7:] for f in df.columns if f.startswith('session')]))
other_feats = sorted(set([f for f in df.columns if not f.startswith(
'lvl') and not f.startswith('session')]))
selection_widget = widgets.GridBox([multi_checkbox_widget(lvl_feats, 'lvl'),
multi_checkbox_widget(
sess_feats, 'sess'),
multi_checkbox_widget(
other_feats, 'other'),
level_selection],
layout=widgets.Layout(grid_template_columns=f"repeat(3, 500px)"))
return selection_widget
def get_selected_feature_list(selection_widget, session_prefix, cc_prefix_max_list=None):
"""
:param selection_widget:
:return: list of features selected
"""
cc_prefix_max_list = cc_prefix_max_list or []
prefix_list = ['lvl']+[prefix_max[0] for prefix_max in cc_prefix_max_list]
other_feats = [
s.description for s in selection_widget.children[-1].children[1].children if s.value]
range_feats_and_range = get_range_feats_and_range(selection_widget)
range_feats_list = []
for i in range(len(range_feats_and_range)):
prefix = prefix_list[i]
feats = range_feats_and_range[i][0]
rang = range_feats_and_range[i][1]
range_feats_list += [f'{prefix}{n}_{f}' for f in feats for n in rang]
return range_feats_list + other_feats
def get_range_feats_and_range(selection_widget) -> (List[str], Iterable):
"""
:param selection_widget:
:return: List of fbases from selection_widget and level range
"""
ret = []
widgets = selection_widget.children
assert len(widgets) % 2
num_range_groups = (len(widgets)-1)//2
for i in range(num_range_groups):
checkbox = widgets[i]
slider = widgets[i+num_range_groups]
start_widget = slider.children[0]
end_widget = slider.children[1]
feat_list = [
s.description for s in checkbox.children[1].children if s.value]
val_range = range(start_widget.value, end_widget.value + 1)
ret.append((feat_list, val_range))
return ret
def multi_checkbox_widget(descriptions, category):
""" Widget with a search field and lots of checkboxes """
search_widget = widgets.Text(
layout={'width': '400px'}, description=f'Search {category}:')
options_dict = {description: widgets.Checkbox(description=description, value=False,
layout={'overflow-x': 'scroll', 'width': '400px'}, indent=False) for
description in descriptions}
options = [options_dict[description] for description in descriptions]
options_widget = widgets.VBox(
options, layout={'overflow': 'scroll', 'height': '400px'})
multi_select = widgets.VBox([search_widget, options_widget])
# Wire the search field to the checkboxes
def on_text_change(change):
search_input = change['new']
if search_input == '':
# Reset search field
for d in descriptions:
options_dict[d].layout.visibility = 'visible'
options_dict[d].layout.height = 'auto'
elif search_input[-1] == '$':
search_input = search_input[:-1]
# Filter by search field using difflib.
for d in descriptions:
if search_input in d:
options_dict[d].layout.visibility = 'visible'
options_dict[d].layout.height = 'auto'
else:
options_dict[d].layout.visibility = 'hidden'
options_dict[d].layout.height = '0px'
# close_matches = [d for d in descriptions if search_input in d] #difflib.get_close_matches(search_input, descriptions, cutoff=0.0)
# new_options = [options_dict[description] for description in close_matches]
# options_widget.children = new_options
search_widget.observe(on_text_change, names='value')
return multi_select
def reduce_feats(df, featlist):
"""
Takes in a df and outputs only the given columns in featlist
:param df:
:param featlist:
:return:
"""
return df[featlist].copy(), [f'*arg* finalfeats = {featlist}']
def reduce_outliers(df, z_thresh, show_graphs=True, outpath=None):
"""
Takes in df and z_thresh, shows box plots, and outputs graph with points of zscore>z_thresh removed.
Does not always work properly. Does not seem to tolerate NaNs.
TODO: fix.
:param df:
:param z_thresh:
:param show_graphs:
:return:
"""
meta = []
meta.append(f"Original Num Rows: {len(df)}")
meta.append(f"*arg* zthresh = {z_thresh}")
title = f'Raw Boxplot Original Data n={len(df)}'
df.plot(kind='box', title=title, figsize=(20, 5))
if outpath:
savepath = os.path.join(outpath, f'Raw Boxplot Original.png')
plt.savefig(savepath)
plt.close()
if z_thresh is None:
return df, meta
z = np.abs(stats.zscore(df))
no_outlier_df = df[(z < z_thresh).all(axis=1)]
meta.append(
f'Removed points with abs(ZScore) >= {z_thresh}. Reduced num rows: {len(no_outlier_df)}')
title = f'Raw Boxplot ZThresh={z_thresh} n={len(no_outlier_df)}'
no_outlier_df.plot(kind='box', title=title, figsize=(20, 5))
if outpath:
savepath = os.path.join(outpath, f'Raw Boxplot Zthresh Removed.png')
plt.savefig(savepath)
plt.close()
return no_outlier_df, meta
jw_cc_max = [('obj', 80), ('int', 188), ('Q', 18)]
def full_filter(df, import_meta, options, outpath) -> (pd.DataFrame, List[str]):
"""
Takes in a df, metadata, and options group.
Outputs the filtered df and the meta.
:param get_df_func:
:param options:
:return:
"""
# df, import_meta = get_df_func()
filtered_df, filter_meta = filter_df(df, **options.filter_args)
game = options.game.upper()
# if game == 'LAKELAND':
# new_feat_df, new_feat_meta = create_new_base_features_lakeland(filtered_df, **options.new_feat_args)
# aggregate_df, aggregate_meta = describe_lvl_feats_lakeland(new_feat_df, options.lvlfeats, options.lvlrange)
# elif game == 'CRYSTAL':
# new_feat_df, new_feat_meta = create_new_base_features_crystal(filtered_df, **options.new_feat_args)
# aggregate_df, aggregate_meta = describe_lvl_feats_crystal(new_feat_df, options.lvlfeats, options.lvlrange)
# elif game == 'WAVES':
# new_feat_df, new_feat_meta = create_new_base_features_waves(filtered_df, **options.new_feat_args)
# aggregate_df, aggregate_meta = describe_lvl_feats_waves(new_feat_df, options.lvlfeats, options.lvlrange)
# else:
# assert False
new_feat_df, new_feat_meta = create_new_base_features(
filtered_df, **options.new_feat_args)
aggregate_df, aggregate_meta = describe_lvl_feats(
new_feat_df, options.lvlfeats, options.lvlrange)
reduced_df, reduced_meta = reduce_feats(aggregate_df, options.finalfeats)
# hack while NaNs are popping up in aggregate df or newfeatdf TODO: Fix this. It never used to be an issue.
reduced_df = reduced_df.fillna(0)
final_df, outlier_meta = reduce_outliers(
reduced_df, options.zthresh, outpath=outpath)
final_meta = import_meta + filter_meta + new_feat_meta + \
aggregate_meta + reduced_meta + outlier_meta
return final_df, final_meta
def save_csv_and_meta(df, meta_list, save_dir, csv_name, meta_name=None, permissions='w+', add_columns=True):
if csv_name.endswith(('.tsv', '.csv')):
extension = csv_name[-4:]
csv_name = csv_name[:-4]
else:
extension = '.csv'
separator = '\t' if extension == '.tsv' else ','
# hardcopy
meta_list = [x for x in meta_list]
meta_list.append(f'OUTPUT_SHAPE: {df.shape}')
meta_list.append(f'OUTPUT_FILE: {csv_name}')
meta_list.append(f'CSV OUTPUT_DATE: {datetime.now()}')
if add_columns:
meta_list.append(f'OUTPUT_COLUMNS: {sorted(list(df.columns))}')
meta_name = meta_name or csv_name + '_meta.txt'
meta_text = save_meta(meta_list, save_dir, meta_name, permissions=permissions)
with open(os.path.join(save_dir, csv_name)+extension, permissions) as f:
for l in meta_text.splitlines():
f.write(f'# {l}\n')
f.write('\n')
df.to_csv(f, sep=separator)
return None, []
def save_meta(meta_list, save_dir, meta_name, permissions='w+'):
meta_text = 'Metadata:\n'+'\n'.join(meta_list+[
f'META OUTPUT_DATE: {datetime.now()}'
])
with open(os.path.join(save_dir, meta_name), permissions) as f:
f.write(meta_text)
return meta_text
def open_csv_from_path_with_meta(csv_fpath, index_col=0):
metadata = []
with open(csv_fpath) as f:
for line in f.readlines():
if line.startswith('#'):
metadata.append(line[2:].strip())
else:
break
df = | pd.read_csv(csv_fpath, comment='#', index_col=index_col) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.