prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import math
import interpolators
import scipy.interpolate
import numpy as np
import pandas as pd
from ..core.status import Status
from empirical_turbulence import AugmentedTurbulenceCorrection
class Relaxation(object):
def __init__(self, correction):
self.correction = correction
def relax(self, wind_speed, turbulence):
return self.correction * turbulence
class NoRelaxation(object):
def relax(self, wind_speed,turbulence):
# suppress unused parameter message in PyCharm
_ = wind_speed
return turbulence
class PowerCurve(object):
def __init__(self,
rotor_geometry,
reference_density,
data_frame,
wind_speed_column,
turbulence_column,
power_column,
count_column=None,
rated_power=None,
name='Undefined',
interpolation_mode='Cubic Spline',
zero_ti_pc_required=False,
x_limits=None,
sub_power=None,
relaxation=NoRelaxation()):
self.name = name
self.interpolation_mode = interpolation_mode
self.reference_density = reference_density
self.data_frame = data_frame
self.wind_speed_column = wind_speed_column
self.turbulence_column = turbulence_column
self.power_column = power_column
self.count_column = count_column
self.x_limits = x_limits
self.sub_power = sub_power
self.rotor_geometry = rotor_geometry
if self.count_column is not None:
self.hours = self.data_frame[count_column].sum()*1.0/6.0
else:
self.hours = None
wind_data = data_frame[self.wind_speed_column]
power_data = data_frame[self.power_column]
self.first_wind_speed = min(wind_data)
self.cut_in_wind_speed = self.calculate_cut_in_wind_speed()
self.cut_out_wind_speed = self.calculate_cut_out_wind_speed()
self.wind_speed_points, self.power_points = self.extract_points(wind_data, power_data)
self.turbulence_function = self.create_one_dimensional_function(self.wind_speed_column,
self.turbulence_column,
supress_negative=True)
self.available_power = AvailablePower(self.rotor_geometry, self.reference_density)
Status.add("calculating power function ({0})".format(self.interpolation_mode), verbosity=3)
self.power_function = self.create_power_function(self.wind_speed_points, self.power_points)
Status.add("power function calculated ({0})".format(type(self.power_function)), verbosity=3)
self.rated_power = self.get_rated_power(rated_power, data_frame[self.power_column])
self._reverted_relaxation = None
self._reverted_simulated_power = None
self._reverted_zero_turbulence_power_curve = None
self.relaxation = relaxation
self.zero_ti_pc_required = zero_ti_pc_required
@property
def zero_ti_pc_required(self):
return self._zero_ti_pc_required
@zero_ti_pc_required.setter
def zero_ti_pc_required(self, value):
if hasattr(self, '_zero_ti_pc_required'):
update = (self._zero_ti_pc_required != value)
else:
update = True
if update:
if value and (self.reference_density is None):
raise Exception("Zero Turbulence Curve cannot be calculated"
" if turbine does not have a well defined density")
self._zero_ti_pc_required = value
self.update_zero_ti()
def get_raw_levels(self):
padded_levels = (self.data_frame['Is Extrapolation'] == True)
return self.data_frame[~padded_levels]
def revert_zero_ti(self):
if self._reverted_zero_turbulence_power_curve is None:
raise Exception('Cannot revert zero turbulence power curve')
self.relaxation = self._reverted_relaxation
self.simulatedPower = self._reverted_simulated_power
self.zeroTurbulencePowerCurve = self._reverted_zero_turbulence_power_curve
self._reverted_relaxation = None
self._reverted_simulated_power = None
self._reverted_zero_turbulence_power_curve = None
def update_zero_ti(self, relaxation=None):
self._reverted_relaxation = self.relaxation
if hasattr(self, 'simulatedPower'):
self._reverted_simulated_power = self.simulatedPower
if hasattr(self, 'zeroTurbulencePowerCurve'):
self._reverted_zero_turbulence_power_curve = self.zeroTurbulencePowerCurve
Status.add("Zero TI Required: {0}".format(self.zero_ti_pc_required), verbosity=3)
if relaxation is not None:
self.relaxation = relaxation
if self.zero_ti_pc_required:
Status.add("Calculating zero turbulence curve for {0} Power Curve".format(self.name), verbosity=3)
try:
self.calculate_zero_turbulence_power_curve()
Status.add("Calculation of zero turbulence curve for {0}"
" Power Curve successful".format(self.name), verbosity=3)
except None as error:
err_msg = "Calculation of zero turbulence curve for {0}" \
" Power Curve unsuccessful: {1}".format(self.name, error)
raise Exception(err_msg)
else:
self.zeroTurbulencePowerCurve = None
self.simulatedPower = None
Status.add("Turbine Created Successfully", verbosity=3)
def get_level(self, wind_speed, tolerance=0.00001):
for i in range(len(self.wind_speed_points)):
diff = abs(self.wind_speed_points[i] - wind_speed)
if diff < tolerance:
return self.power_points[i]
raise Exception("Cannot find level: {0}".format(wind_speed))
def calculate_zero_turbulence_power_curve(self):
integration_range = IntegrationRange(0.0, 100.0, 0.1)
wind_speeds = []
powers = []
turbulence_values = []
for index in self.data_frame.index:
wind_speed = self.data_frame.loc[index, self.wind_speed_column]
power = self.data_frame.loc[index, self.power_column]
turbulence = self.data_frame.loc[index, self.turbulence_column]
if not np.isnan(wind_speed) and \
not np.isnan(power) and \
not np.isnan(turbulence) and \
wind_speed >= 0.0 and \
power >= 0.0 and \
turbulence > 0:
wind_speeds.append(wind_speed)
turbulence_values.append(turbulence)
powers.append(power)
self.zeroTurbulencePowerCurve = ZeroTurbulencePowerCurve(wind_speeds,
powers,
turbulence_values,
integration_range,
self.available_power,
self.reference_density,
self.relaxation)
self.simulatedPower = SimulatedPower(self.zeroTurbulencePowerCurve, integration_range)
def get_rated_power(self, rated_power, power_curve_levels):
if rated_power is None:
return power_curve_levels.max()
else:
return rated_power
def get_threshold_wind_speed(self):
return float(interpolators.LinearPowerCurveInterpolator(self.power_points, self.wind_speed_points,
self.rated_power)(0.85 * self.rated_power) * 1.5)
def get_turbulence_levels(self, power_curve_levels, turbulence_levels, fixed_turbulence):
if fixed_turbulence is not None:
turbulence_levels = | pd.Series(index=power_curve_levels.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #t-C #source: van Beijma et al. (2018)
initAGB_min = 233-72 #t-C
initAGB_max = 233 + 72 #t-C
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26 #years
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #fraction of carbon content in biomass
c_cont_po_plasma = 0.5454 #fraction of carbon content in biomass
tf = 201 #years
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S2nu = df2nu['Firewood_other_energy_use'].values
c_firewood_energy_S2pl = df2pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
decomp_emissions = df['C_remainAGB'].values
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM2nu = DynamicStockModel(t = df2nu['Year'].values, i = df2nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM2pl = DynamicStockModel(t = df2pl['Year'].values, i = df2pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2nu, ExitFlag2nu = TestDSM2nu.dimension_check()
CheckStr2pl, ExitFlag2pl = TestDSM2pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort2nu, ExitFlag2nu = TestDSM2nu.compute_s_c_inflow_driven()
Stock_by_cohort2pl, ExitFlag2pl = TestDSM2pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S2nu, ExitFlag2nu = TestDSM2nu.compute_stock_total()
S2pl, ExitFlag2pl = TestDSM2pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C2nu, ExitFlag2nu = TestDSM2nu.compute_o_c_from_s_c()
O_C2pl, ExitFlag2pl = TestDSM2pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O2nu, ExitFlag2nu = TestDSM2nu.compute_outflow_total()
O2pl, ExitFlag2pl = TestDSM2pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS2nu, ExitFlag2nu = TestDSM2nu.compute_stock_change()
DS2pl, ExitFlag2pl = TestDSM2pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal2nu, ExitFlag2nu = TestDSM2nu.check_stock_balance()
Bal2pl, ExitFlag2pl = TestDSM2pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM2nu.o)
print(TestDSM2pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#%%
#Step (5): Biomass growth
#Model I Oil Palm Biomass Growth (Khasanah et al. (2015))
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S2nu = df2nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S2pl = df2pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = | pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl') | pandas.read_excel |
import pandas as pd
import os
import time
try:from ethnicolr import census_ln, pred_census_ln,pred_wiki_name,pred_fl_reg_name
except: os.system('pip install ethnicolr')
import seaborn as sns
import matplotlib.pylab as plt
import scipy
from itertools import permutations
import numpy as np
import matplotlib.gridspec as gridspec
from igraph import VertexClustering
from itertools import combinations
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "Palatino"
plt.rcParams['font.serif'] = "Palatino"
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Palatino:italic'
plt.rcParams['mathtext.bf'] = 'Palatino:bold'
plt.rcParams['mathtext.cal'] = 'Palatino'
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.neural_network import MLPClassifier,MLPRegressor
from sklearn.linear_model import RidgeClassifierCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import RidgeCV
from sklearn.decomposition import PCA
from statsmodels.stats.multitest import multipletests
import multiprocessing
from multiprocessing import Pool
import tqdm
import igraph
from scipy.stats import pearsonr
global paper_df
global main_df
global g
global graphs
global pal
global homedir
global method
global node_2_a
global a_2_node
global a_2_paper
global control
global matrix_idxs
global prs
# matrix_idxs = {'white_M':0,'white_W':1,'white_U':2,'api_M':3,'api_W':4,'api_U':5,'hispanic_M':6,'hispanic_W':7,'hispanic_U':8,'black_M':9,'black_W':10,'black_U':11}
pal = np.array([[72,61,139],[82,139,139],[180,205,205],[205,129,98]])/255.
# global us_only
# us_only = True
"""
AF = author names, with the format LastName, FirstName; LastName, FirstName; etc..
SO = journal
DT = document type (review or article)
CR = reference list
TC = total citations received (at time of downloading about a year ago)
PD = month of publication
PY = year of publication
DI = DOI
"""
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return v
parser = argparse.ArgumentParser()
parser.add_argument('-homedir',action='store',dest='homedir',default='/Users/maxwell/Dropbox/Bertolero_Bassett_Projects/citations/')
parser.add_argument('-method',action='store',dest='method',default='wiki')
parser.add_argument('-continent',type=str2bool,action='store',dest='continent',default=False)
parser.add_argument('-continent_only',type=str2bool,action='store',dest='continent_only',default=False)
parser.add_argument('-control',type=str2bool,action='store',dest='control',default=False)
parser.add_argument('-within_poc',type=str2bool,action='store',dest='within_poc',default=False)
parser.add_argument('-walk_length',type=str,action='store',dest='walk_length',default='cited')
parser.add_argument('-walk_papers',type=str2bool,action='store',dest='walk_papers',default=False)
r = parser.parse_args()
locals().update(r.__dict__)
globals().update(r.__dict__)
wiki_2_race = {"Asian,GreaterEastAsian,EastAsian":'api', "Asian,GreaterEastAsian,Japanese":'api',
"Asian,IndianSubContinent":'api', "GreaterAfrican,Africans":'black', "GreaterAfrican,Muslim":'black',
"GreaterEuropean,British":'white', "GreaterEuropean,EastEuropean":'white',
"GreaterEuropean,Jewish":'white', "GreaterEuropean,WestEuropean,French":'white',
"GreaterEuropean,WestEuropean,Germanic":'white', "GreaterEuropean,WestEuropean,Hispanic":'hispanic',
"GreaterEuropean,WestEuropean,Italian":'white', "GreaterEuropean,WestEuropean,Nordic":'white'}
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
def log_p_value(p):
if p == 0.0:
p = "-log10($\it{p}$)>250"
elif p > 0.001:
p = np.around(p,3)
p = "$\it{p}$=%s"%(p)
else:
p = (-1) * np.log10(p)
p = "-log10($\it{p}$)=%s"%(np.around(p,0).astype(int))
return p
def convert_r_p(r,p):
return "$\it{r}$=%s\n%s"%(np.around(r,2),log_p_value(p))
def nan_pearsonr(x,y):
xmask = np.isnan(x)
ymask = np.isnan(y)
mask = (xmask==False) & (ymask==False)
return pearsonr(x[mask],y[mask])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def make_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
# if os.path.exists('/%s/data/result_df_%s.csv'%(homedir,method)):
# df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
# return df
main_df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
result_df = pd.DataFrame(columns=['fa_race','la_race','citation_count'])
store_fa_race = []
store_la_race = []
store_citations = []
store_year = []
store_journal = []
store_fa_g = []
store_la_g = []
store_fa_category = []
store_la_category = []
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
store_year.append(entry[1]['PY'])
store_journal.append(entry[1]['SO'])
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
try:store_citations.append(len(entry[1].cited.split(',')))
except:store_citations.append(0)
##wiki
if method =='wiki':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = wiki_2_race[pred_wiki_name(fa_df,'lname','fname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = wiki_2_race[pred_wiki_name(la_df,'lname','fname').race.values[0]]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_fl_reg_name(fa_df,'lname','fname').race.values[0].split('_')[-1]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_fl_reg_name(la_df,'lname','fname').race.values[0].split('_')[-1]
#census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race,la_race= r.race.values
if method =='combined':
##wiki
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = wiki_2_race[pred_wiki_name(fa_df,'fname','lname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = wiki_2_race[pred_wiki_name(la_df,'fname','lname').race.values[0]]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census,la_race_census= r.race.values
if la_race_census != la_race_wiki:
if la_race_wiki == 'white':
la_race = la_race_census
if la_race_census == 'white':
la_race = la_race_wiki
elif (la_race_census != 'white') & (la_race_wiki != 'white'): la_race = la_race_wiki
elif la_race_census == la_race_wiki: la_race = la_race_wiki
if fa_race_census != fa_race_wiki:
if fa_race_wiki == 'white':
fa_race = fa_race_census
if fa_race_census == 'white':
fa_race = fa_race_wiki
elif (fa_race_census != 'white') & (fa_race_wiki != 'white'): fa_race = fa_race_wiki
elif fa_race_census == fa_race_wiki: fa_race = fa_race_wiki
store_la_race.append(la_race)
store_fa_race.append(fa_race)
store_fa_g.append(entry[1].AG[0])
store_la_g.append(entry[1].AG[1])
store_fa_category.append('%s_%s' %(fa_race,entry[1].AG[0]))
store_la_category.append('%s_%s' %(la_race,entry[1].AG[1]))
result_df['fa_race'] = store_fa_race
result_df['la_race'] = store_la_race
result_df['fa_g'] = store_fa_g
result_df['la_g'] = store_la_g
result_df['journal'] = store_journal
result_df['year'] = store_year
result_df['citation_count'] = store_citations
result_df['fa_category'] = store_fa_category
result_df['la_category'] = store_la_category
# result_df.citation_count = result_df.citation_count.values.astype(int)
result_df.to_csv('/%s/data/result_df_%s.csv'%(homedir,method),index=False)
return result_df
def make_pr_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.zeros((main_df.shape[0],8,8))
gender_base = {}
for year in np.unique(main_df.PY.values):
ydf = main_df[main_df.PY==year].AG
fa = np.array([x[0] for x in ydf.values])
la = np.array([x[1] for x in ydf.values])
fa_m = len(fa[fa=='M'])/ len(fa[fa!='U'])
fa_w = len(fa[fa=='W'])/ len(fa[fa!='U'])
la_m = len(la[fa=='M'])/ len(la[la!='U'])
la_w = len(la[fa=='W'])/ len(la[la!='U'])
gender_base[year] = [fa_m,fa_w,la_m,la_w]
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
if method =='wiki_black':
black = [3]
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
fa_g = entry[1].AG[0]
la_g = entry[1].AG[1]
paper_matrix = np.zeros((2,8))
# 1/0
##wiki
if method =='wiki' or method == 'wiki_black':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race = [np.sum(fa_race[white]),np.sum(fa_race[asian]),np.sum(fa_race[hispanic]),np.sum(fa_race[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race = [np.sum(la_race[white]),np.sum(la_race[asian]),np.sum(la_race[hispanic]),np.sum(la_race[black])]
# #census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(fa_df,'lname','fname').values[0][3:]
fa_race = [white,asian,hispanic,black]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(la_df,'lname','fname').values[0][3:]
la_race = [white,asian,hispanic,black]
if method == 'combined':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race_wiki = [np.sum(fa_race_wiki[white]),np.sum(fa_race_wiki[asian]),np.sum(fa_race_wiki[hispanic]),np.sum(fa_race_wiki[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race_wiki = [np.sum(la_race_wiki[white]),np.sum(la_race_wiki[asian]),np.sum(la_race_wiki[hispanic]),np.sum(la_race_wiki[black])]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race_census = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if fa_race_census[0] < fa_race_wiki[0]: fa_race = fa_race_census
else: fa_race = fa_race_wiki
if la_race_census[0] < la_race_wiki[0]: la_race = la_race_census
else: la_race = la_race_wiki
gender_b = gender_base[year]
if fa_g == 'M': paper_matrix[0] = np.outer([1,0],fa_race).flatten()
if fa_g == 'W': paper_matrix[0] = np.outer([0,1],fa_race).flatten()
if fa_g == 'U': paper_matrix[0] = np.outer([gender_b[0],gender_b[1]],fa_race).flatten()
if la_g == 'M': paper_matrix[1] = np.outer([1,0],la_race).flatten()
if la_g == 'W': paper_matrix[1] = np.outer([0,1],la_race).flatten()
if la_g == 'U': paper_matrix[1] = np.outer([gender_b[2],gender_b[3]],la_race).flatten()
paper_matrix = np.outer(paper_matrix[0],paper_matrix[1])
paper_matrix = paper_matrix / np.sum(paper_matrix)
prs[entry[0]] = paper_matrix
np.save('/%s/data/result_pr_df_%s.npy'%(homedir,method),prs)
def make_all_author_race():
"""
this makes the actual data by pulling the race from the census or wiki data,
but this version include middle authors, which we use for the co-authorship networks
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append(a)
df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
df = df.drop_duplicates('name')
if method =='florida':
# 1/0
r = pred_fl_reg_name(df,'lname','fname')
r.rename(columns={'nh_black':'black','nh_white':'white'})
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
if method =='census':
r = pred_census_ln(df,'lname')
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
r = dict(zip(df.name.values,df.race.values))
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
a_lname,a_fname = a.split(', ')
races.append(r[a_lname.strip()])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
race2wiki = {'api': ["Asian,GreaterEastAsian,EastAsian","Asian,GreaterEastAsian,Japanese", "Asian,IndianSubContinent"],
'black':["GreaterAfrican,Africans", "GreaterAfrican,Muslim"],
'white':["GreaterEuropean,British", "GreaterEuropean,EastEuropean", "GreaterEuropean,Jewish", "GreaterEuropean,WestEuropean,French",
"GreaterEuropean,WestEuropean,Germanic", "GreaterEuropean,WestEuropean,Nordic", "GreaterEuropean,WestEuropean,Italian"],
'hispanic':["GreaterEuropean,WestEuropean,Hispanic"]}
if method =='wiki':
r = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r[race] = 0.0
for e in race2wiki[race]:
r[race] = r[race] + r[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r = r.drop(columns=[e])
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
races.append(r[r.name==a].race.values[0])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
if method =='combined':
r_wiki = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r_wiki[race] = 0.0
for e in race2wiki[race]:
r_wiki[race] = r_wiki[race] + r_wiki[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r_wiki = r_wiki.drop(columns=[e])
r_census = pred_census_ln(df,'lname')
census = r_census.white < r_wiki.white
wiki = r_census.white > r_wiki.white
r = r_census.copy()
r[census] = r_census
r[wiki] = r_wiki
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
def figure_1_pr_authors():
df = pd.read_csv('/%s/data/result_df_%s_all.csv'%(homedir,method))
paper_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
results = []
for year in np.unique(paper_df.PY.values):
print (year)
ydf = paper_df[paper_df.PY==year]
names = []
for p in ydf.iterrows():
for n in p[1].AF.split(';'):
names.append(n.strip())
names = np.unique(names)
result = np.zeros((len(names),4))
for idx,name in enumerate(names):
try:result[idx] = df[df.name==name].values[0][-4:]
except:result[idx] = np.nan
results.append(np.nansum(result,axis=0))
results = np.array(results)
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 14, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:7])
ax1_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(results.transpose()[[3,0,2,1]],axis=0), labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'black')
plt.margins(0,0)
plt.ylabel('sum of predicted author race')
plt.xlabel('publication year')
ax1.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
# 1/0
ax2 = fig.add_subplot(gs[:15,8:])
ax2_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(np.divide(results.transpose()[[3,0,2,1]],np.sum(results,axis=1)),axis=0)*100, labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0),alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'white')
plt.margins(0,0)
plt.ylabel('percentage of predicted author race',labelpad=-5)
plt.xlabel('publication year')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=2)
plt.savefig('authors.pdf')
def figure_1_pr():
n_iters = 1000
df =pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0).rename({'PY':'year','SO':'journal'},axis='columns')
matrix = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
results = np.zeros((len(np.unique(df.year)),4))
if within_poc == False:
labels = ['white author & white author','white author & author of color','author of color & white author','author of color &\nauthor of color']
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
if within_poc == True:
names = ['white author','Asian author','Hispanic author','Black author']
groups = [[0,4],[1,5],[2,6],[3,7]]
labels = names
plot_matrix = np.zeros((matrix.shape[0],len(groups)))
for i in range(4):
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,groups[i],:],axis=-1),axis=-1)
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,:,groups[i]],axis=-1),axis=-1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 16, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:5])
plt.sca(ax1)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels),colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=9,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'w')
plt.margins(0,0)
plt.ylabel('percentage of publications')
plt.xlabel('publication year')
ax1.tick_params(axis='x', which='major', pad=-1)
ax1.tick_params(axis='y', which='major', pad=0)
i,j,k,l = np.flip(results[0]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
# i,j,k,l = np.array([100]) - np.array([i,j,k,l])
plt.sca(ax1)
ax1.yaxis.set_major_formatter(ticker.PercentFormatter())
ax1.set_yticks([i,j,k,l])
ax1.set_yticklabels(np.flip(np.around(results[0]*100,0).astype(int)))
ax2 = ax1_plot[0].axes.twinx()
plt.sca(ax2)
i,j,k,l = np.flip(results[-1]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
plt.ylim(0,100)
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.set_yticks([i,j,k,l])
ax2.set_yticklabels(np.flip(np.around(results[-1]*100,0)).astype(int))
plt.xticks([1995., 2000., 2005., 2010., 2015., 2019],np.array([1995., 2000., 2005., 2010., 2015., 2019]).astype(int))
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
plot_df = pd.DataFrame(columns=['year','percentage','iteration'])
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([r,year,i]).reshape(1,-1),columns=['percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
a = plot_df[(plot_df.iteration==i)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = mean_confidence_interval(pct_df.percentage)
ci = np.around(ci,2)
print ("Across 1000 bootstraps, the mean percent increase per year was %s%% (95 CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]))
plt.text(.5,.48,"Increasing at %s%% per year\n(95%% CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]),{'fontsize':8,'color':'white'},horizontalalignment='center',verticalalignment='bottom',rotation=9,transform=ax2.transAxes)
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,6:10]))
jidx=jidx+3
for aidx,journal in enumerate(np.unique(df.journal)):
ax = axes[aidx]
plt.sca(ax)
if aidx == 2: ax.set_ylabel('percentage of publications')
if aidx == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
results = np.zeros(( len(np.unique(df[(df.journal==journal)].year)),4))
for yidx,year in enumerate(np.unique(df[(df.journal==journal)].year)):
papers = df[(df.year==year)&(df.journal==journal)].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
data = df[df.journal==journal]
if journal == 'NATURE NEUROSCIENCE':
for i in range(3): results = np.concatenate([[[0,0,0,0]],results],axis=0)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels,axis=0),colors=np.flip(pal,axis=0), alpha=1)
plt.margins(0,0)
ax.set_yticks([])
if aidx != 4:
ax.set_xticks([])
else: plt.xticks(np.array([1996.5,2017.5]),np.array([1995.,2019]).astype(int))
plt.title(journal.title(), pad=-10,color='w',fontsize=8)
if aidx == 0: plt.text(0,1,'b',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
journals = np.unique(df.journal)
plot_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
for j in journals:
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)&(df.journal==j)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([j,r,year,i]).reshape(1,-1),columns=['journal','percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
for j in journals:
a = plot_df[(plot_df.iteration==i)&(plot_df.journal==j)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['journal','year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.journal = j
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = pct_df.groupby(['journal']).percentage.agg(mean_confidence_interval).values
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,11:]))
jidx=jidx+3
for i,ax,journal,color in zip(range(5),axes,journals,sns.color_palette("rocket_r", 5)):
plt.sca(ax)
ax.clear()
#
# plot_df[np.isnan(plot_df.percentage)] = 0.0
if i == 0: plt.text(0,1,'c',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
lp = sns.lineplot(data=plot_df[plot_df.journal==journal],y='percentage',x='year',color=color,ci='sd')
plt.margins(0,0)
thisdf = plot_df[plot_df.journal==journal]
minp = int(np.around(thisdf.mean()['percentage'],0))
thisdf = thisdf[thisdf.year==thisdf.year.max()]
maxp = int(np.around(thisdf.mean()['percentage'],0))
plt.text(-0.01,.5,'%s'%(minp),horizontalalignment='right',verticalalignment='top', transform=ax.transAxes,fontsize=10)
plt.text(1.01,.9,'%s'%(maxp),horizontalalignment='left',verticalalignment='top', transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
# ax.set_xticks([])
ax.set_ylabel('')
plt.margins(0,0)
ax.set_yticks([])
if i == 2:
ax.set_ylabel('percentage of publications',labelpad=12)
if i != 4: ax.set_xticks([])
else: plt.xticks(np.array([1.5,22.5]),np.array([1995.,2019]).astype(int))
mean_pc,min_pc,max_pc = np.around(ci[i],2)
if i == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
else: ax.set_xlabel('')
plt.text(.99,0,'95%' + "CI: %s<%s<%s"%(min_pc,mean_pc,max_pc),horizontalalignment='right',verticalalignment='bottom', transform=ax.transAxes,fontsize=8)
if journal == 'NATURE NEUROSCIENCE':
plt.xlim(-3,21)
plt.savefig('/%s/figures/figure1_pr_%s_%s.pdf'%(homedir,method,within_poc))
def validate():
black_names = pd.read_csv('%s/data/Black scientists - Faculty.csv'%(homedir))['Name'].values[1:]
fnames = []
lnames = []
all_names =[]
for n in black_names:
try:
fn,la = n.split(' ')[:2]
fnames.append(fn.strip())
lnames.append(la.strip())
all_names.append('%s_%s'%(fn.strip(),la.strip()))
except:continue
black_df = pd.DataFrame(np.array([all_names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append('%s_%s'%(a_fname,a_lname))
main_df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = main_df.drop_duplicates('name')
if method == 'wiki':
black_r = pred_wiki_name(black_df,'lname','fname')
all_r = pred_wiki_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.as_matrix()[:,4:][:,black].sum(axis=1)
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.as_matrix()[:,4:][:,black].sum(axis=1)
black_df['sample'] = 'Black-in-STEM'
if method == 'florida':
black_r = pred_fl_reg_name(black_df,'lname','fname')
all_r = pred_fl_reg_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-2]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-2]
black_df['sample'] = 'Black-in-STEM'
if method == 'census':
black_r = pred_census_ln(black_df,'lname')
all_r = pred_census_ln(main_df,'lname')
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-3]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-3]
black_df['sample'] = 'Black-in-STEM'
data = all_df.append(black_df,ignore_index=True)
data.probability = data.probability.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_%s.pdf'%(method))
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data[data['sample']=='papers'],x='probability',stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data[data['sample']=='Black-in-STEM'],x='probability',hue="sample",stat='density',common_norm=False,bins=20)
# plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_2.pdf')
def make_pr_control():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s.npy'%(homedir,method),ridge_probabilities)
def make_pr_control_jn():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
# 6) paper sub-field
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location','category'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
cat = entry[1].category
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc,cat]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method),ridge_probabilities)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
def write_matrix():
main_df = pd.read_csv('/%s/data/ArticleDataNew.csv'%(homedir))
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
small_matrix = np.zeros((2,2))
matrix_idxs = {'white':0,'api':1,'hispanic':2,'black':3}
small_idxs = {'white':0,'api':1,'hispanic':1,'black':1}
for fa_r in ['white','api','hispanic','black']:
for la_r in ['white','api','hispanic','black']:
small_matrix[small_idxs[fa_r],small_idxs[la_r]] += np.sum(prs[:,matrix_idxs[fa_r],matrix_idxs[la_r]],axis=0)
np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.sum(prs,axis=0))
np.save('//Users/maxwell/Documents/GitHub/unbiasedciter/expected_small_matrix_%s.npy'%(method),small_matrix)
def convert_df():
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = | pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method)) | pandas.read_csv |
"""
A feature store client. This module exposes an API for interacting with feature stores in Hopsworks.
It hides complexity and provides utility methods such as:
- `project_featurestore()`.
- `get_featuregroup()`.
- `get_feature()`.
- `get_features()`.
- `sql()`
- `insert_into_featuregroup()`
- `get_featurestore_metadata()`
- `get_project_featurestores()`
- `get_featuregroups()`
- `get_training_datasets()`
Below is some example usages of this API (assuming you have two featuregroups called
'trx_graph_summary_features' and 'trx_summary_features' with schemas:
|-- cust_id: integer (nullable = true)
|-- pagerank: float (nullable = true)
|-- triangle_count: float (nullable = true)
and
|-- cust_id: integer (nullable = true)
|-- min_trx: float (nullable = true)
|-- max_trx: float (nullable = true)
|-- avg_trx: float (nullable = true)
|-- count_trx: long (nullable = true)
, respectively.
>>> from hops import featurestore
>>> # Get feature group example
>>> #The API will default to version 1 for the feature group and the project's own feature store
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features")
>>> #You can also explicitly define version and feature store:
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features", featurestore=featurestore.project_featurestore(), featuregroup_version = 1)
>>>
>>> # Get single feature example
>>> #The API will infer the featuregroup and default to version 1 for the feature group with this and the project's own feature store
>>> max_trx_feature = featurestore.get_feature("max_trx")
>>> #You can also explicitly define feature group,version and feature store:
>>> max_trx_feature = featurestore.get_feature("max_trx", featurestore=featurestore.project_featurestore(), featuregroup="trx_summary_features", featuregroup_version = 1)
>>> # When you want to get features from different feature groups the API will infer how to join the features together
>>>
>>> # Get list of features example
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"], featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"], featurestore=featurestore.project_featurestore(), featuregroups_version_dict={"trx_graph_summary_features": 1, "trx_summary_features": 1}, join_key="cust_id")
>>>
>>> # Run SQL query against feature store example
>>> # The API will default to the project's feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5").show(5)
>>> # You can also explicitly define the feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5", featurestore=featurestore.project_featurestore()).show(5)
>>>
>>> # Insert into featuregroup example
>>> # The API will default to the project's feature store, featuegroup version 1, and write mode 'append'
>>> featurestore.insert_into_featuregroup(sampleDf, "trx_graph_summary_features")
>>> # You can also explicitly define the feature store, the featuregroup version, and the write mode (only append and overwrite are supported)
>>> featurestore.insert_into_featuregroup(sampleDf, "trx_graph_summary_features", featurestore=featurestore.project_featurestore(), featuregroup_version=1, mode="append", descriptive_statistics=True, feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=None)
>>>
>>> # Get featurestore metadata example
>>> # The API will default to the project's feature store
>>> featurestore.get_featurestore_metadata()
>>> # You can also explicitly define the feature store
>>> featurestore.get_featurestore_metadata(featurestore=featurestore.project_featurestore())
>>>
>>> # List all Feature Groups in a Feature Store
>>> featurestore.get_featuregroups()
>>> # By default `get_featuregroups()` will use the project's feature store, but this can also be specified with the optional argument `featurestore`
>>> featurestore.get_featuregroups(featurestore=featurestore.project_featurestore())
>>>
>>> # List all Training Datasets in a Feature Store
>>> featurestore.get_training_datasets()
>>> # By default `get_training_datasets()` will use the project's feature store, but this can also be specified with the optional argument featurestore
>>> featurestore.get_training_datasets(featurestore=featurestore.project_featurestore())
>>>
>>> # Get list of featurestores accessible by the current project example
>>> featurestore.get_project_featurestores()
>>> # By default `get_featurestore_metadata` will use the project's feature store, but this can also be specified with the optional argument featurestore
>>> featurestore.get_featurestore_metadata(featurestore=featurestore.project_featurestore())
>>>
>>> # Compute featuergroup statistics (feature correlation, descriptive stats, feature distributions etc) with Spark that will show up in the Featurestore Registry in Hopsworks
>>> # The API will default to the project's featurestore, featuregroup version 1, and compute all statistics for all columns
>>> featurestore.update_featuregroup_stats("trx_summary_features")
>>> # You can also be explicitly specify featuregroup details and what statistics to compute:
>>> featurestore.update_featuregroup_stats("trx_summary_features", featuregroup_version=1, featurestore=featurestore.project_featurestore(), descriptive_statistics=True,feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=None)
>>> # If you only want to compute statistics for certain set of columns and exclude surrogate key-columns for example, you can use the optional argument stat_columns to specify which columns to include:
>>> featurestore.update_featuregroup_stats("trx_summary_features", featuregroup_version=1, featurestore=featurestore.project_featurestore(), descriptive_statistics=True, feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=['avg_trx', 'count_trx', 'max_trx', 'min_trx'])
>>>
>>> # Create featuregroup from an existing dataframe
>>> # In most cases it is recommended that featuregroups are created in the UI on Hopsworks and that care is taken in documenting the featuregroup.
>>> # However, sometimes it is practical to create a featuregroup directly from a spark dataframe and fill in the metadata about the featuregroup later in the UI.
>>> # This can be done through the create_featuregroup API function
>>>
>>> # By default the new featuregroup will be created in the project's featurestore and the statistics for the new featuregroup will be computed based on the provided spark dataframe.
>>> featurestore.create_featuregroup(trx_summary_df1, "trx_summary_features_2", description="trx_summary_features without the column count_trx")
>>> # You can also be explicitly specify featuregroup details and what statistics to compute:
>>> featurestore.create_featuregroup(trx_summary_df1, "trx_summary_features_2_2", description="trx_summary_features without the column count_trx",featurestore=featurestore.project_featurestore(),featuregroup_version=1, job_name=None, dependencies=[], descriptive_statistics=False, feature_correlation=False, feature_histograms=False, cluster_analysis=False, stat_columns=None)
>>>
>>> # After you have found the features you need in the featurestore you can materialize the features into a training dataset
>>> # so that you can train a machine learning model using the features. Just as for featuregroups,
>>> # it is useful to version and document training datasets, for this reason HopsML supports managed training datasets which enables
>>> # you to easily version, document and automate the materialization of training datasets.
>>>
>>> # First we select the features (and/or labels) that we want
>>> dataset_df = featurestore.get_features(["pagerank", "triangle_count", "avg_trx", "count_trx", "max_trx", "min_trx","balance", "number_of_accounts"],featurestore=featurestore.project_featurestore())
>>> # Now we can create a training dataset from the dataframe with some extended metadata such as schema (automatically inferred).
>>> # By default when you create a training dataset it will be in "tfrecords" format and statistics will be computed for all features.
>>> # After the dataset have been created you can view and/or update the metadata about the training dataset from the Hopsworks featurestore UI
>>> featurestore.create_training_dataset(dataset_df, "AML_dataset")
>>> # You can override the default configuration if necessary:
>>> featurestore.create_training_dataset(dataset_df, "TestDataset", description="", featurestore=featurestore.project_featurestore(), data_format="csv", training_dataset_version=1, job_name=None, dependencies=[], descriptive_statistics=False, feature_correlation=False, feature_histograms=False, cluster_analysis=False, stat_columns=None)
>>>
>>> # Once a dataset have been created, its metadata is browsable in the featurestore registry in the Hopsworks UI.
>>> # If you don't want to create a new training dataset but just overwrite or insert new data into an existing training dataset,
>>> # you can use the API function 'insert_into_training_dataset'
>>> featurestore.insert_into_training_dataset(dataset_df, "TestDataset")
>>> # By default the insert_into_training_dataset will use the project's featurestore, version 1,
>>> # and update the training dataset statistics, this configuration can be overridden:
>>> featurestore.insert_into_training_dataset(dataset_df,"TestDataset", featurestore=featurestore.project_featurestore(), training_dataset_version=1, descriptive_statistics=True, feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=None)
>>>
>>> # After a managed dataset have been created, it is easy to share it and re-use it for training various models.
>>> # For example if the dataset have been materialized in tf-records format you can call the method get_training_dataset_path(training_dataset)
>>> # to get the HDFS path and read it directly in your tensorflow code.
>>> featurestore.get_training_dataset_path("AML_dataset")
>>> # By default the library will look for the training dataset in the project's featurestore and use version 1, but this can be overriden if required:
>>> featurestore.get_training_dataset_path("AML_dataset", featurestore=featurestore.project_featurestore(), training_dataset_version=1)
"""
from hops import util
from hops import hdfs
from hops import tls
import pydoop.hdfs as pydoop
from hops import constants
import math
from pyspark.sql.utils import AnalysisException
import json
from pyspark.mllib.stat import Statistics
import pandas as pd
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import PCA
from pyspark.ml.linalg import Vectors
from pyspark.ml.clustering import KMeans
import re
import numpy as np
import pandas as pd
from pyspark.sql import DataFrame
from pyspark.rdd import RDD
from pyspark.sql import SQLContext
from tempfile import TemporaryFile
import pyarrow as pa
from petastorm.etl.dataset_metadata import materialize_dataset
# for backwards compatibility
try:
import h5py
except:
pass
# for backwards compatibility
try:
import tensorflow as tf
except:
pass
metadata_cache = None
def project_featurestore():
"""
Gets the project's featurestore name (project_featurestore)
Returns:
the project's featurestore name
"""
project_name = hdfs.project_name()
featurestore_name = project_name.lower() + constants.FEATURE_STORE.FEATURESTORE_SUFFIX
return featurestore_name
def _log(x):
"""
Generic log function (in case logging is changed from stdout later)
Args:
:x: the argument to log
Returns:
None
"""
print(x)
def _run_and_log_sql(spark, sql_str):
"""
Runs and logs an SQL query with sparkSQL
Args:
:spark: the spark session
:sql_str: the query to run
Returns:
the result of the SQL query
"""
_log("Running sql: {}".format(sql_str))
return spark.sql(sql_str)
def _get_table_name(featuregroup, version):
"""
Gets the Hive table name of a featuregroup and version
Args:
:featuregroup: the featuregroup to get the table name of
:version: the version of the featuregroup
Returns:
The Hive table name of the featuregroup with the specified version
"""
return featuregroup + "_" + str(version)
def _get_featurestore_metadata(featurestore=None, update_cache=False):
"""
Makes a REST call to the appservice in hopsworks to get all featuregroups and training datasets for
the provided featurestore, authenticating with keystore and password.
Args:
:featurestore: the name of the database, defaults to the project's featurestore
:update_cache: if true the cache is updated
Returns:
JSON list of featuregroups
"""
if featurestore is None:
featurestore = project_featurestore()
global metadata_cache
if metadata_cache is None or update_cache:
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORE_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
featurestore)
response = util.send_request(connection, method, resource_url)
resp_body = response.read()
response_object = json.loads(resp_body)
metadata_cache = response_object
return metadata_cache
def _parse_featuregroups_json(featuregroups):
"""
Parses the list of JSON featuregroups into a dict {featuregroup --> {features, version}}
Args:
:featuregroups: a list of JSON featuregroups
Returns:
A list of of [{featuregroupName, features, version}]
"""
parsed_featuregroups = []
for fg in featuregroups:
parsed_fg = {}
parsed_fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME] = fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME]
parsed_fg[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES] = fg[
constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES]
parsed_fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION] = fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]
parsed_featuregroups.append(parsed_fg)
return parsed_featuregroups
def _find_featuregroup_that_contains_feature(featuregroups, feature):
"""
Go through list of featuregroups and find the ones that contain the feature
Args:
:featuregroups: featuregroups to search through
:feature: the feature to look for
Returns:
a list of featuregroup names and versions for featuregroups that contain the given feature
"""
matches = []
for fg in featuregroups:
for f in fg[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES]:
fg_table_name = _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION])
full_name = fg_table_name + "." + f[constants.REST_CONFIG.JSON_FEATURE_NAME]
if f[constants.REST_CONFIG.JSON_FEATURE_NAME] == feature or full_name == feature:
matches.append(fg)
break
return matches
def _use_featurestore(spark, featurestore=None):
"""
Selects the featurestore database in Spark
Args:
:spark: the spark session
:featurestore: the name of the database, defaults to the project's featurestore
Returns:
None
"""
if featurestore is None:
featurestore = project_featurestore()
try:
sql_str = "use " + featurestore
_run_and_log_sql(spark, sql_str)
except AnalysisException as e:
raise AssertionError((
"A hive database for the featurestore {} was not found, have you enabled the featurestore service in your project?".format(
featurestore)))
def _return_dataframe_type(dataframe, dataframe_type):
"""
Helper method for returning te dataframe in spark/pandas/numpy/python, depending on user preferences
Args:
:dataframe: the spark dataframe to convert
:dataframe_type: the type to convert to (spark,pandas,numpy,python)
Returns:
The dataframe converted to either spark, pandas, numpy or python.
"""
if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK:
return dataframe
if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PANDAS:
return dataframe.toPandas()
if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY:
return np.array(dataframe.collect())
if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PYTHON:
return dataframe.collect()
def _convert_dataframe_to_spark(dataframe):
"""
Helper method for converting a user-provided dataframe into a spark dataframe
Args:
:dataframe: the input dataframe (supported types are spark rdds, spark dataframes, pandas dataframes, python 2D lists, and numpy 2D arrays)
Returns:
the dataframe convertd to a spark dataframe
"""
spark = util._find_spark()
if isinstance(dataframe, pd.DataFrame):
sc = spark.sparkContext
sql_context = SQLContext(sc)
return sql_context.createDataFrame(dataframe)
if isinstance(dataframe, list):
dataframe = np.array(dataframe)
if isinstance(dataframe, np.ndarray):
if dataframe.ndim != 2:
raise AssertionError(
"Cannot convert numpy array that do not have two dimensions to a dataframe. The number of dimensions are: {}".format(
dataframe.ndim))
num_cols = dataframe.shape[1]
dataframe_dict = {}
for n_col in list(range(num_cols)):
col_name = "col_" + str(n_col)
dataframe_dict[col_name] = dataframe[:, n_col]
pandas_df = pd.DataFrame(dataframe_dict)
sc = spark.sparkContext
sql_context = SQLContext(sc)
return sql_context.createDataFrame(pandas_df)
if isinstance(dataframe, RDD):
return dataframe.toDF()
if isinstance(dataframe, DataFrame):
return dataframe
raise AssertionError(
"The provided dataframe type is not recognized. Supported types are: spark rdds, spark dataframes, pandas dataframes, python 2D lists, and numpy 2D arrays. The provided dataframe has type: {}".format(
type(dataframe)))
def get_featuregroup(featuregroup, featurestore=None, featuregroup_version=1, dataframe_type="spark"):
"""
Gets a featuregroup from a featurestore as a spark dataframe
Example usage:
>>> #The API will default to version 1 for the feature group and the project's own feature store
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features")
>>> #You can also explicitly define version and feature store:
>>> trx_summary_features = featurestore.get_featuregroup("trx_summary_features", featurestore=featurestore.project_featurestore(), featuregroup_version = 1)
Args:
:featuregroup: the featuregroup to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
Returns:
a spark dataframe with the contents of the featurestore
"""
if featurestore is None:
featurestore = project_featurestore()
spark = util._find_spark()
spark.sparkContext.setJobGroup("Fetching Featuregroup",
"Getting feature group: {} from the featurestore {}".format(featuregroup,
featurestore))
_use_featurestore(spark, featurestore)
sql_str = "SELECT * FROM " + _get_table_name(featuregroup, featuregroup_version)
result = _run_and_log_sql(spark, sql_str)
spark.sparkContext.setJobGroup("", "")
return _return_dataframe_type(result, dataframe_type)
def _find_feature(feature, featurestore, featuregroups_parsed):
"""
Looks if a given feature can be uniquely found in a list of featuregroups and returns that featuregroup.
Otherwise it throws an exception
Args:
:feature: the feature to search for
:featurestore: the featurestore where the featuregroups resides
:featuregroups_parsed: the featuregroups to look through
Returns:
the featuregroup that contains the feature
"""
featuregroups_matched = _find_featuregroup_that_contains_feature(featuregroups_parsed, feature)
if (len(featuregroups_matched) == 0):
raise AssertionError(
"Could not find the feature with name '{}' in any of the featuregroups of the featurestore: '{}'".format(
feature, featurestore))
if (len(featuregroups_matched) > 1):
featuregroups_matched_str_list = map(lambda fg: _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
fg[
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]),
featuregroups_matched)
featuregroups_matched_str = ",".join(featuregroups_matched_str_list)
raise AssertionError("Found the feature with name '{}' "
"in more than one of the featuregroups of the featurestore: '{}', "
"please specify the optional argument 'featuregroup=', "
"the matched featuregroups were: {}".format(feature, featurestore,
featuregroups_matched_str))
return featuregroups_matched[0]
def get_feature(feature, featurestore=None, featuregroup=None, featuregroup_version=1, dataframe_type="spark"):
"""
Gets a particular feature (column) from a featurestore, if no featuregroup is specified it queries hopsworks metastore
to see if the feature exists in any of the featuregroups in the featurestore. If the user knows which featuregroup
contain the feature, it should be specified as it will improve performance of the query. Will first try to construct the query
from the cached metadata, if that fails, it retries after updating the cache
Example usage:
>>> #The API will infer the featuregroup and default to version 1 for the feature group with this and the project's own feature store
>>> max_trx_feature = featurestore.get_feature("max_trx")
>>> #You can also explicitly define feature group,version and feature store:
>>> max_trx_feature = featurestore.get_feature("max_trx", featurestore=featurestore.project_featurestore(), featuregroup="trx_summary_features", featuregroup_version = 1)
Args:
:feature: the feature name to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup: (Optional) the featuregroup where the feature resides
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
Returns:
A spark dataframe with the feature
"""
try: # try with cached metadata
return _do_get_feature(feature, _get_featurestore_metadata(featurestore, update_cache=False), featurestore=featurestore, featuregroup=featuregroup, featuregroup_version=featuregroup_version, dataframe_type=dataframe_type)
except: # Try again after updating cache
return _do_get_feature(feature, _get_featurestore_metadata(featurestore, update_cache=True), featurestore=featurestore, featuregroup=featuregroup, featuregroup_version=featuregroup_version, dataframe_type=dataframe_type)
def _do_get_feature(feature, featurestore_metadata, featurestore=None, featuregroup=None, featuregroup_version=1, dataframe_type="spark"):
"""
Gets a particular feature (column) from a featurestore, if no featuregroup is specified it queries hopsworks metastore
to see if the feature exists in any of the featuregroups in the featurestore. If the user knows which featuregroup
contain the feature, it should be specified as it will improve performance of the query.
Args:
:feature: the feature name to get
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroup: (Optional) the featuregroup where the feature resides
:featuregroup_version: (Optional) the version of the featuregroup
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:featurestore_metadata: the metadata of the featurestore to query
Returns:
A spark dataframe with the feature
"""
if featurestore is None:
featurestore = project_featurestore()
spark = util._find_spark()
_use_featurestore(spark, featurestore)
spark.sparkContext.setJobGroup("Fetching Feature",
"Getting feature: {} from the featurestore {}".format(feature, featurestore))
if (featuregroup != None):
sql_str = "SELECT " + feature + " FROM " + _get_table_name(featuregroup, featuregroup_version)
result = _run_and_log_sql(spark, sql_str)
return _return_dataframe_type(result, dataframe_type)
else:
# make REST call to find out where the feature is located and return them
# if the feature exists in multiple tables return an error message specifying this
featuregroups_json = featurestore_metadata["featuregroups"]
featuregroups_parsed = _parse_featuregroups_json(featuregroups_json)
if (len(featuregroups_parsed) == 0):
raise AssertionError("Could not find any featuregroups in the metastore, " \
"please explicitly supply featuregroups as an argument to the API call")
featuregroup_matched = _find_feature(feature, featurestore, featuregroups_parsed)
sql_str = "SELECT " + feature + " FROM " + _get_table_name(
featuregroup_matched[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
featuregroup_matched[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION])
result = _run_and_log_sql(spark, sql_str)
spark.sparkContext.setJobGroup("Fetching Feature",
"Getting feature: {} from the featurestore {}".format(feature, featurestore))
return _return_dataframe_type(result, dataframe_type)
def _get_join_str(featuregroups, join_key):
"""
Constructs the JOIN COl,... ON X string from a list of tables (featuregroups) and join column
Args:
:featuregroups: the featuregroups to join
:join_key: the key to join on
Returns:
SQL join string to join a set of feature groups together
"""
join_str = ""
for idx, fg in enumerate(featuregroups):
if (idx != 0):
join_str = join_str + "JOIN " + _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + " "
join_str = join_str + "ON "
for idx, fg in enumerate(featuregroups):
if (idx != 0 and idx < (len(featuregroups) - 1)):
join_str = join_str + _get_table_name(featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
featuregroups[0][
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + ".`" + join_key + "`=" + \
_get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + ".`" + join_key + "` AND "
elif (idx != 0 and idx == (len(featuregroups) - 1)):
join_str = join_str + _get_table_name(featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
featuregroups[0][
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + ".`" + join_key + "`=" + \
_get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + ".`" + join_key + "`"
return join_str
def _get_col_that_is_primary(common_cols, featuregroups):
"""
Helper method that returns the column among a shared column between featuregroups that is most often marked as
'primary' in the hive schema.
Args:
:common_cols: the list of columns shared between all featuregroups
:featuregroups: the list of featuregroups
Returns:
the column among a shared column between featuregroups that is most often marked as 'primary' in the hive schema
"""
primary_counts = []
for col in common_cols:
primary_count = 0
for fg in featuregroups:
for feature in fg[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES]:
if feature[constants.REST_CONFIG.JSON_FEATURE_NAME] == col and feature[
constants.REST_CONFIG.JSON_FEATURE_PRIMARY]:
primary_count = primary_count + 1
primary_counts.append(primary_count)
max_no_primary = max(primary_counts)
if max(primary_counts) == 0:
return common_cols[0]
else:
return common_cols[primary_counts.index(max_no_primary)]
def _validate_metadata(name, dtypes, dependencies, description):
"""
Function for validating metadata when creating new feature groups and training datasets.
Raises and assertion exception if there is some error in the metadata.
Args:
:name: the name of the feature group/training dataset
:dtypes: the dtypes in the provided spark dataframe
:dependencies: the list of data dependencies
:description: the description
Returns:
None
"""
name_pattern = re.compile("^[a-zA-Z0-9_]+$")
if len(name) > 256 or name == "" or not name_pattern.match(name):
raise AssertionError("Name of feature group/training dataset cannot be empty, cannot exceed 256 characters," \
" and must match the regular expression: ^[a-zA-Z0-9_]+$, the provided name: {} is not valid".format(
name))
if len(dtypes) == 0:
raise AssertionError("Cannot create a feature group from an empty spark dataframe")
for dtype in dtypes:
if len(dtype[0]) > 767 or dtype[0] == "" or not name_pattern.match(dtype[0]):
raise AssertionError("Name of feature column cannot be empty, cannot exceed 767 characters," \
" and must match the regular expression: ^[a-zA-Z0-9_]+$, the provided feature name: {} is not valid".format(
dtype[0]))
if not len(set(dependencies)) == len(dependencies):
dependencies_str = ",".join(dependencies)
raise AssertionError("The list of data dependencies contains duplicates: {}".format(dependencies_str))
if len(description) > 2000:
raise AssertionError(
"Feature group/Training dataset description should not exceed the maximum length of 2000 characters, the provided description has length: {}".format(
len(description)))
def _get_join_col(featuregroups):
"""
Finds a common JOIN column among featuregroups (hive tables)
Args:
:featuregroups: a list of featuregroups with version and features
Returns:
name of the join column
"""
feature_sets = []
for fg in featuregroups:
columns = fg[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES]
columns_names = map(lambda col: col[constants.REST_CONFIG.JSON_FEATURE_NAME], columns)
feature_set = set(columns_names)
feature_sets.append(feature_set)
common_cols = list(set.intersection(*feature_sets))
if (len(common_cols) == 0):
featuregroups_str = ", ".join(
list(map(lambda x: x[constants.REST_CONFIG.JSON_FEATUREGROUPNAME], featuregroups)))
raise AssertionError("Could not find any common columns in featuregroups to join on, " \
"searched through featuregroups: " \
"{}".format(featuregroups_str))
return _get_col_that_is_primary(common_cols, featuregroups)
def _convert_featuregroup_version_dict(featuregroups_version_dict):
"""
Converts a featuregroup->version dict into a list of {name: name, version: version}
Args:
:featuregroups_version_dict:
Returns:
a list of {featuregroup_name: name, version: version}
"""
parsed_featuregroups = []
for i, (name, version) in enumerate(featuregroups_version_dict.items()):
parsed_featuregroup = {}
parsed_featuregroup[constants.REST_CONFIG.JSON_FEATUREGROUPNAME] = name
parsed_featuregroup[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION] = version
parsed_featuregroups.append(parsed_featuregroup)
return parsed_featuregroups
def get_features(features, featurestore=None, featuregroups_version_dict={}, join_key=None, dataframe_type="spark"):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored. It will try to construct the query first from the cached metadata,
if that fails it will re-try after reloading the cache
Example usage:
>>> # The API will default to version 1 for feature groups and the project's feature store
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"], featurestore=featurestore.project_featurestore())
>>> #You can also explicitly define feature group, version, feature store, and join-key:
>>> features = featurestore.get_features(["pagerank", "triangle_count", "avg_trx"], featurestore=featurestore.project_featurestore(), featuregroups_version_dict={"trx_graph_summary_features": 1, "trx_summary_features": 1}, join_key="cust_id")
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: (Optional) the version of the featuregroup
:join_key: (Optional) column name to join on
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
Returns:
A spark dataframe with all the features
"""
# try with cached metadata
try:
return _do_get_features(features, _get_featurestore_metadata(featurestore, update_cache=False), featurestore=featurestore, featuregroups_version_dict=featuregroups_version_dict, join_key=join_key, dataframe_type=dataframe_type)
# Try again after updating cache
except:
return _do_get_features(features, _get_featurestore_metadata(featurestore, update_cache=True), featurestore=featurestore, featuregroups_version_dict=featuregroups_version_dict, join_key=join_key, dataframe_type=dataframe_type)
def _do_get_features(features, featurestore_metadata, featurestore=None, featuregroups_version_dict={}, join_key=None, dataframe_type="spark"):
"""
Gets a list of features (columns) from the featurestore. If no featuregroup is specified it will query hopsworks
metastore to find where the features are stored.
Args:
:features: a list of features to get from the featurestore
:featurestore: the featurestore where the featuregroup resides, defaults to the project's featurestore
:featuregroups: (Optional) a dict with (fg --> version) for all the featuregroups where the features resides
:featuregroup_version: (Optional) the version of the featuregroup
:join_key: (Optional) column name to join on
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
:featurestore_metadata: the metadata of the featurestore
Returns:
A spark dataframe with all the features
"""
if featurestore is None:
featurestore = project_featurestore()
features = list(set(features))
spark = util._find_spark()
_use_featurestore(spark, featurestore)
spark.sparkContext.setJobGroup("Fetching Features",
"Getting features: {} from the featurestore {}".format(features, featurestore))
featuresStr = ", ".join(features)
featuregroupsStrings = []
for fg in featuregroups_version_dict:
featuregroupsStrings.append(_get_table_name(fg, featuregroups_version_dict[fg]))
featuregroupssStr = ", ".join(featuregroupsStrings)
if (len(featuregroups_version_dict) == 1):
sql_str = "SELECT " + featuresStr + " FROM " + featuregroupssStr
result = _run_and_log_sql(spark, sql_str)
return _return_dataframe_type(result, dataframe_type)
if (len(featuregroups_version_dict) > 1):
if (join_key != None):
featuregroups_parsed_filtered = _convert_featuregroup_version_dict(featuregroups_version_dict)
join_str = _get_join_str(featuregroups_parsed_filtered, join_key)
else:
featuregroups_json = featurestore_metadata[constants.REST_CONFIG.JSON_FEATUREGROUPS]
featuregroups_parsed = _parse_featuregroups_json(featuregroups_json)
if (len(featuregroups_parsed) == 0):
raise AssertionError("Could not find any featuregroups in the metastore, " \
"please explicitly supply featuregroups as an argument to the API call")
featuregroups_parsed_filtered = list(filter(
lambda fg: fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME] in featuregroups_version_dict and
featuregroups_version_dict[
fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME]] == fg[
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION],
featuregroups_parsed))
join_col = _get_join_col(featuregroups_parsed_filtered)
join_str = _get_join_str(featuregroups_parsed_filtered, join_col)
sql_str = "SELECT " + featuresStr + " FROM " + _get_table_name(
featuregroups_parsed_filtered[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
featuregroups_parsed_filtered[0][constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) \
+ " " + join_str
result = _run_and_log_sql(spark, sql_str)
spark.sparkContext.setJobGroup("", "")
return _return_dataframe_type(result, dataframe_type)
if (len(featuregroups_version_dict) == 0):
# make REST call to find out where the feature is located and return them
# if the feature exists in multiple tables return an error message specifying this
featuregroups_json = featurestore_metadata[constants.REST_CONFIG.JSON_FEATUREGROUPS]
featuregroups_parsed = _parse_featuregroups_json(featuregroups_json)
if (len(featuregroups_parsed) == 0):
raise AssertionError("Could not find any featuregroups in the metastore, " \
"please explicitly supply featuregroups as an argument to the API call")
feature_to_featuregroup = {}
feature_featuregroups = []
for feature in features:
featuregroup_matched = _find_feature(feature, featurestore, featuregroups_parsed)
feature_to_featuregroup[feature] = featuregroup_matched
if not _check_if_list_of_featuregroups_contains_featuregroup(feature_featuregroups,
featuregroup_matched[
constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
featuregroup_matched[
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]):
feature_featuregroups.append(featuregroup_matched)
if len(feature_featuregroups) == 1:
sql_str = "SELECT " + featuresStr + " FROM " + _get_table_name(
feature_featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
feature_featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION])
else:
join_col = _get_join_col(feature_featuregroups)
join_str = _get_join_str(feature_featuregroups, join_col)
sql_str = "SELECT " + featuresStr + " FROM " + _get_table_name(
feature_featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],
feature_featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + " " \
+ join_str
result = _run_and_log_sql(spark, sql_str)
return _return_dataframe_type(result, dataframe_type)
def _check_if_list_of_featuregroups_contains_featuregroup(featuregroups, featuregroupname, version):
"""
Check if a list of featuregroup contains a featuregroup with a particular name and version
Args:
:featuregroups: the list of featuregroups to search through
:featuregroupname: the name of the featuregroup
:version: the featuregroup version
Returns:
boolean indicating whether the featuregroup name and version exists in the list
"""
match_bool = False
for fg in featuregroups:
if (fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME] == featuregroupname and fg[
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION] == version):
match_bool = True
return match_bool
def sql(query, featurestore=None, dataframe_type="spark"):
"""
Executes a generic SQL query on the featurestore
Example usage:
>>> # The API will default to the project's feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5").show(5)
>>> # You can also explicitly define the feature store
>>> featurestore.sql("SELECT * FROM trx_graph_summary_features_1 WHERE triangle_count > 5", featurestore=featurestore.project_featurestore()).show(5)
Args:
:query: SQL query
:featurestore: the featurestore to query, defaults to the project's featurestore
:dataframe_type: the type of the returned dataframe (spark, pandas, python or numpy)
Returns:
A dataframe with the query results
"""
if featurestore is None:
featurestore = project_featurestore()
spark = util._find_spark()
spark.sparkContext.setJobGroup("Running SQL query against feature store",
"Running query: {} on the featurestore {}".format(query, featurestore))
_use_featurestore(spark, featurestore)
result = _run_and_log_sql(spark, query)
spark.sparkContext.setJobGroup("", "")
return _return_dataframe_type(result, dataframe_type)
def _delete_table_contents(featurestore, featuregroup, featuregroup_version):
"""
Sends a request to clear the contents of a featuregroup by dropping the featuregroup and recreating it with
the same metadata.
Args:
:featurestore: the featurestore where the featuregroup resides
:featuregroup: the featuregroup to clear
:featuregroup_version: the version of the featuregroup
Returns:
The JSON response
"""
json_contents = {constants.REST_CONFIG.JSON_FEATURESTORENAME: featurestore,
constants.REST_CONFIG.JSON_FEATUREGROUPNAME: featuregroup,
constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION: featuregroup_version}
json_embeddable = json.dumps(json_contents)
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_POST
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_CLEAR_FEATUREGROUP_RESOURCE)
response = util.send_request(connection, method, resource_url, json_embeddable, headers)
resp_body = response.read()
response_object = json.loads(resp_body)
try: # for python 3
if response.code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise AssertionError("Could not clear featuregroup contents, server response: \n "
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
response.code, response.reason, error_code, error_msg, user_msg))
except: # for python 2
if response.status != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise AssertionError("Could not clear featuregroup contents, server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
response.code, response.reason, error_code, error_msg, user_msg))
return response_object
def _get_featurestores():
"""
Sends a REST request to get all featurestores for the project
Returns:
a list of Featurestore JSON DTOs
"""
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
hdfs.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE)
response = util.send_request(connection, method, resource_url)
resp_body = response.read()
response_object = json.loads(resp_body)
try: # for python 3
if response.code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise AssertionError("Could not fetch feature stores, server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
response.code, response.reason, error_code, error_msg, user_msg))
except: # for python 2
if response.status != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise AssertionError("Could not fetch feature stores, server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
response.code, response.reason, error_code, error_msg, user_msg))
return response_object
def _write_featuregroup_hive(spark_df, featuregroup, featurestore, featuregroup_version, mode):
"""
Writes the contents of a spark dataframe to a feature group Hive table
Args:
:spark_df: the data to write
:featuregroup: the featuregroup to write to
:featurestore: the featurestore where the featuregroup resides
:featuregroup_version: the version of the featuregroup
:mode: the write mode (append or overwrite)
Returns:
None
"""
spark = util._find_spark()
spark.sparkContext.setJobGroup("Inserting dataframe into featuregroup",
"Inserting into featuregroup: {} in the featurestore {}".format(featuregroup,
featurestore))
_use_featurestore(spark, featurestore)
tbl_name = _get_table_name(featuregroup, featuregroup_version)
if mode == constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE:
_delete_table_contents(featurestore, featuregroup, featuregroup_version)
if not mode == constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE and not mode == constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE:
raise AssertionError(
"The provided write mode {} does not match the supported modes: ['{}', '{}']".format(mode,
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE,
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE))
# overwrite is not supported because it will drop the table and create a new one,
# this means that all the featuregroup metadata will be dropped due to ON DELETE CASCADE
# to simulate "overwrite" we call appservice to drop featuregroup and re-create with the same metadata
mode = constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE
# Specify format hive as it is managed table
format = "hive"
spark_df.write.format(format).mode(mode).saveAsTable(tbl_name)
spark.sparkContext.setJobGroup("", "")
def insert_into_featuregroup(df, featuregroup, featurestore=None, featuregroup_version=1, mode="append",
descriptive_statistics=True, feature_correlation=True, feature_histograms=True,
cluster_analysis=True, stat_columns=None, num_bins=20, corr_method='pearson',
num_clusters=5):
"""
Saves the given dataframe to the specified featuregroup. Defaults to the project-featurestore
This will append to the featuregroup. To overwrite a featuregroup, create a new version of the featuregroup
from the UI and append to that table.
Example usage:
>>> # The API will default to the project's feature store, featuegroup version 1, and write mode 'append'
>>> featurestore.insert_into_featuregroup(sampleDf, "trx_graph_summary_features")
>>> # You can also explicitly define the feature store, the featuregroup version, and the write mode (only append and overwrite are supported)
>>> featurestore.insert_into_featuregroup(sampleDf, "trx_graph_summary_features", featurestore=featurestore.project_featurestore(), featuregroup_version=1, mode="append", descriptive_statistics=True, feature_correlation=True, feature_histograms=True, cluster_analysis=True, stat_columns=None))
Args:
:df: the dataframe containing the data to insert into the featuregroup
:featuregroup: the name of the featuregroup (hive table name)
:featurestore: the featurestore to save the featuregroup to (hive database)
:featuregroup_version: the version of the featuregroup (defaults to 1)
:mode: the write mode, only 'overwrite' and 'append' are supported
:descriptive_statistics: a boolean flag whether to compute descriptive statistics (min,max,mean etc) for the featuregroup
:feature_correlation: a boolean flag whether to compute a feature correlation matrix for the numeric columns in the featuregroup
:feature_histograms: a boolean flag whether to compute histograms for the numeric columns in the featuregroup
:cluster_analysis: a boolean flag whether to compute cluster analysis for the numeric columns in the featuregroup
:stat_columns: a list of columns to compute statistics for (defaults to all columns that are numeric)
:num_bins: number of bins to use for computing histograms
:num_clusters: number of clusters to use for cluster analysis
:corr_method: the method to compute feature correlation with (pearson or spearman)
Returns:
None
"""
try:
spark_df = _convert_dataframe_to_spark(df)
except Exception as e:
raise AssertionError("Could not convert the provided dataframe to a spark dataframe which is required in order to save it to the Feature Store, error: {}".format(str(e)))
if featurestore is None:
featurestore = project_featurestore()
feature_corr_data, featuregroup_desc_stats_data, features_histogram_data, cluster_analysis_data = _compute_dataframe_stats(
featuregroup, spark_df=spark_df, version=featuregroup_version, featurestore=featurestore,
descriptive_statistics=descriptive_statistics, feature_correlation=feature_correlation,
feature_histograms=feature_histograms, cluster_analysis=cluster_analysis, stat_columns=stat_columns,
num_bins=num_bins, corr_method=corr_method,
num_clusters=num_clusters)
_write_featuregroup_hive(spark_df, featuregroup, featurestore, featuregroup_version, mode)
_update_featuregroup_stats_rest(featuregroup, featurestore, featuregroup_version, feature_corr_data,
featuregroup_desc_stats_data, features_histogram_data, cluster_analysis_data)
def _convert_spark_dtype_to_hive_dtype(spark_dtype):
"""
Helper function to convert a spark data type into a hive datatype
Args:
:spark_dtype: the spark datatype to convert
Returns:
the hive datatype or None
"""
if type(spark_dtype) is dict:
if spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].lower() == constants.SPARK_CONFIG.SPARK_ARRAY:
return spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE] + "<" + _convert_spark_dtype_to_hive_dtype(spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_ELEMENT_TYPE]) + ">"
if spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE].lower() == constants.SPARK_CONFIG.SPARK_STRUCT:
struct_nested_fields = list(map(
lambda field: field[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_NAME] +
constants.DELIMITERS.COLON_DELIMITER + _convert_spark_dtype_to_hive_dtype(field[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE]),
spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS]))
return spark_dtype[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE] + "<" + constants.DELIMITERS.COMMA_DELIMITER.join(struct_nested_fields) + ">"
if spark_dtype.upper() in constants.HIVE_CONFIG.HIVE_DATA_TYPES:
return spark_dtype.upper()
if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_LONG_TYPE:
return constants.HIVE_CONFIG.HIVE_BIGINT_TYPE
if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_SHORT_TYPE:
return constants.HIVE_CONFIG.HIVE_INT_TYPE
if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_BYTE_TYPE:
return constants.HIVE_CONFIG.HIVE_CHAR_TYPE
if spark_dtype.lower() == constants.SPARK_CONFIG.SPARK_INTEGER_TYPE:
return constants.HIVE_CONFIG.HIVE_INT_TYPE
if constants.SPARK_CONFIG.SPARK_DECIMAL_TYPE in spark_dtype.lower():
return spark_dtype
raise AssertionError("Dataframe data type: {} not recognized.".format(spark_dtype))
def _convert_field_to_feature(field_dict, primary_key):
"""
Helper function that converts a field in a spark dataframe to a feature dict that is compatible with the
featurestore API
Args:
:field_dict: the dict of spark field to convert
:primary_key: name of the primary key feature
Returns:
a feature dict that is compatible with the featurestore API
"""
f_name = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_NAME]
f_type = _convert_spark_dtype_to_hive_dtype(field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_TYPE])
f_desc = ""
if (f_name == primary_key):
f_primary = True
else:
f_primary = False
if constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION in field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA]:
f_desc = field_dict[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELD_METADATA][
constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION]
if f_desc == "":
f_desc = "-" # comment must be non-empty
return {
constants.REST_CONFIG.JSON_FEATURE_NAME: f_name,
constants.REST_CONFIG.JSON_FEATURE_TYPE: f_type,
constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION: f_desc,
constants.REST_CONFIG.JSON_FEATURE_PRIMARY: f_primary
}
def _parse_spark_features_schema(spark_schema, primary_key):
"""
Helper function for parsing the schema of a spark dataframe into a list of feature-dicts
Args:
:spark_schema: the spark schema to parse
:primary_key: the column in the dataframe that should be the primary key
Returns:
A list of the parsed features
"""
raw_schema = json.loads(spark_schema.json())
raw_fields = raw_schema[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS]
parsed_features = list(map(lambda field: _convert_field_to_feature(field, primary_key), raw_fields))
return parsed_features
def _compute_corr_matrix(spark_df, corr_method='pearson'):
"""
A helper function for computing a correlation matrix of a spark dataframe (works only with numeric columns).
The correlation matrix represents the pair correlation of all the variables. By default the method will use
Pearson correlation (a measure of the linear correlation between two variables X and Y,
it has a value between +1 and -1, where 1 is total positive linear correlation,
0 is no linear correlation, and -1 is total negative linear correlation).
The correlation matrix is computed with Spark.
Args:
:spark_df: the spark dataframe to compute the correlation matrix for
:method: the correlation method, defaults to pearson (spearman supported as well)
Returns:
a pandas dataframe with the correlation matrix
"""
numeric_columns = spark_df.dtypes
if (len(numeric_columns) == 0):
raise AssertionError("The provided spark dataframe does not contain any numeric columns. " \
"Cannot compute feature correlation on categorical columns. The numeric datatypes are: {}" \
" and the number of numeric datatypes in the dataframe is: {} ({})".format(
constants.SPARK_CONFIG.SPARK_NUMERIC_TYPES, len(spark_df.dtypes), spark_df.dtypes))
if (len(numeric_columns) == 1):
raise AssertionError("The provided spark dataframe only contains one numeric column. " \
"Cannot compute feature correlation on just one column. The numeric datatypes are: {}" \
"and the number of numeric datatypes in the dataframe is: {} ({})".format(
constants.SPARK_CONFIG.SPARK_NUMERIC_TYPES, len(spark_df.dtypes), spark_df.dtypes))
if (len(numeric_columns) > constants.FEATURE_STORE.MAX_CORRELATION_MATRIX_COLUMNS):
raise AssertionError(
"The provided dataframe contains {} columns, feature correlation can only be computed for dataframes with < {} columns due to scalability reasons (number of correlatons grows quadratically with the number of columns)".format(
len(numeric_columns), constants.FEATURE_STORE.MAX_CORRELATION_MATRIX_COLUMNS))
spark_df_rdd = spark_df.rdd.map(lambda row: row[0:])
corr_mat = Statistics.corr(spark_df_rdd, method=corr_method)
pd_df_corr_mat = | pd.DataFrame(corr_mat, columns=spark_df.columns, index=spark_df.columns) | pandas.DataFrame |
import pandas as pd
HDNames= ['Cement','BFS','FLA','Water','SP','CA','FA','Age','CCS']
Data = | pd.read_excel('ConcreteData.xlsx', names=HDNames) | pandas.read_excel |
import logging
import time
from contextlib import contextmanager
import numpy as np
import pandas as pd
import scipy.stats
from openml import datasets, runs
from sklearn.model_selection import train_test_split
logger = logging.getLogger("dashboard")
logger.setLevel(logging.DEBUG)
def get_run_df(run_id: int):
run = runs.get_run(int(run_id), ignore_cache=True)
df = pd.DataFrame(run.fold_evaluations.items(), columns=["evaluations", "results"])
# Evaluations table
result_list = []
result_string = []
for result in df["results"]:
k_folds = list(result[0].values())
mean = str(np.round(np.mean(np.array(k_folds)), 3))
std = str(np.round(np.std(np.array(k_folds)), 3))
result_list.append(k_folds)
result_string.append(mean + " \u00B1 " + std)
df.drop(["results"], axis=1, inplace=True)
df["results"] = result_list
df["values"] = result_string
# Add some more rows indicating output prediction file name
df2 = pd.DataFrame(run.output_files.items(), columns=["evaluations", "results"])
df2["values"] = ""
df3 = pd.DataFrame(
{"task_type": run.task_type}.items(), columns=["evaluations", "results"]
)
df2["values"] = ""
df = df.append(df2)
df = df.append(df3)
df.to_pickle("cache/run" + str(run_id) + ".pkl")
return run, df
def clean_dataset(df):
df = df.loc[:, df.isnull().mean() < 0.8]
out = df.fillna(df.mode().iloc[0])
return out
def get_metadata(data_id: int):
data = datasets.get_dataset(data_id, download_data=False)
features = pd.DataFrame(
[vars(data.features[i]) for i in range(0, len(data.features))]
)
is_target = [
"true" if name == data.default_target_attribute else "false"
for name in features["name"]
]
features["Target"] = is_target
# Extract #categories
size = [
str(len(value)) if value is not None else " "
for value in features["nominal_values"]
]
features["nominal_values"].replace({None: " "}, inplace=True)
features["# categories"] = size
# choose features to be displayed
meta_features = features[
["name", "data_type", "number_missing_values", "# categories", "Target"]
]
meta_features.rename(
columns={
"name": "Attribute",
"data_type": "DataType",
"number_missing_values": "Missing values",
},
inplace=True,
)
meta_features.sort_values(by="Target", ascending=False, inplace=True)
if meta_features.shape[0] > 1000:
meta_features = meta_features[:1000]
return meta_features, data, (vars(data)["name"])
def get_data_metadata(data_id):
"""Download the dataset and get metadata
:param data_id: ID of the OpenML dataset
:return:
"""
# Get data in pandas df format
import time
start = time.time()
meta_features, data, _ = get_metadata(data_id)
x, y, categorical, attribute_names = data.get_data()
df = pd.DataFrame(x, columns=attribute_names)
if x.shape[0] < 50000:
df.to_pickle("cache/df" + str(data_id) + ".pkl")
else:
# create a subsample of data for large datasets
try:
target_feat = meta_features[meta_features["Target"] == "true"][
"Attribute"
].values[0]
except IndexError:
target_feat = None
pass
if x.shape[0] >= 50000 and target_feat:
df = clean_dataset(df)
if x.shape[0] < 100000:
sample_size = 0.5
elif 100000 <= x.shape[0] < 500000:
sample_size = 0.25
elif 500000 <= x.shape[0] < 1e6:
sample_size = 0.1
else:
sample_size = 0.05
x = df.drop(target_feat, axis=1)
y = df[target_feat]
try:
X_train, X_test, y_train, y_test = train_test_split(
x, y, stratify=y, test_size=sample_size
)
except ValueError:
X_train, X_test, y_train, y_test = train_test_split(
x, y, stratify=None, test_size=sample_size
)
x = X_test
x[target_feat] = y_test
df = pd.DataFrame(x, columns=attribute_names)
df.to_pickle("cache/df" + str(data_id) + ".pkl")
else:
df.to_pickle("cache/df" + str(data_id) + ".pkl")
meta_features = meta_features[
meta_features["Attribute"].isin(pd.Series(df.columns))
]
# Add entropy
numerical_features = list(
meta_features["Attribute"][meta_features["DataType"] == "numeric"]
)
nominal_features = list(
meta_features["Attribute"][meta_features["DataType"] == "nominal"]
)
entropy = []
for column in meta_features["Attribute"]:
if column in nominal_features:
count = df[column].value_counts()
ent = round(scipy.stats.entropy(count), 2)
entropy.append(ent)
else:
entropy.append(" ")
meta_features["Entropy"] = entropy
meta_features["Target"].replace({"false": " "}, inplace=True)
end = time.time()
logger.debug("time taken download data and find entropy " + str(end - start))
return df, meta_features, numerical_features, nominal_features
def get_highest_rank(df, leaderboard):
df.sort_values(by=["upload_time"], inplace=True)
scores = []
# highest_rank = {}
highest_score = {}
setup_ids = []
for index, row in df.iterrows():
users = list(highest_score.keys())
new_user = row["uploader_name"] not in users
if row["setup_id"] not in setup_ids or new_user:
setup_ids.append(row["setup_id"])
score = row["value"]
if new_user or (score not in scores):
scores.append(score)
scores.sort(reverse=True)
# rank = scores.index(score) + 1
if new_user or (highest_score[row["uploader_name"]] < score):
# highest_rank[row['uploader_name']] = rank
highest_score[row["uploader_name"]] = score
# if highest_rank[row['uploader_name']] > row['Rank']:
# highest_rank[row['uploader_name']] = row['Rank']
# leaderboard['highest_rank'] = list(highest_rank.values())
leaderboard["Top Score"] = list(highest_score.values())
return leaderboard
def splitDataFrameList(df, target_column):
"""df = dataframe to split,
target_column = the column containing the values to split
separator = the symbol used to perform the split
returns: a dataframe with each entry for the target column separated,
with each element moved into a new row.
The values in the other columns are duplicated across the newly divided rows.
"""
def splitListToRows(row, row_accumulator, target_column):
split_row = row[target_column]
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
row_accumulator.append(new_row)
new_rows = []
df.apply(splitListToRows, axis=1, args=(new_rows, target_column))
new_df = | pd.DataFrame(new_rows) | pandas.DataFrame |
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import os
class HobsHeader(object):
sim_head = '"SIMULATED EQUIVALENT"'
obs_head = '"OBSERVED VALUE"'
obs_name = '"OBSERVATION NAME"'
date = 'DATE'
dyear = 'DECIMAL_YEAR'
header = {sim_head: None,
obs_head: None,
obs_name: None,
date: None,
dyear: None}
class HobsOut(dict):
"""
Reads output data from Hobs file and prepares it for post processing.
Class sets observations to an ordered dictionary based on observation name
If observation name is consistant for a site, a time series is created
for plotting!
Parameters
----------
filename : str
hobs filename
strip_after : str
flag to indicate a character to strip the hobs label after for
grouping wells.
Example: OBS_1
OBS_2
strip_after could be set to "_" and then all OBS observations will
be stored under the OBS key. This is extremely useful for plotting
and calculating statistics
"""
def __init__(self, filename, strip_after=""):
super(HobsOut, self).__init__()
self.name = filename
self._strip_after = strip_after
self._dataframe = None
self.__read_hobs_output()
def __read_hobs_output(self):
"""
Method to read a hobs output file. Dynamically sets header information
and reads associated values.
Sets values to HobsOut dictionary
"""
with open(self.name) as hobout:
for ix, line in enumerate(hobout):
if ix == 0:
self.__set_header(line)
else:
self.__set_dictionary_values(line)
def __set_dictionary_values(self, line):
"""
Method to set incoming hobs line to dictionary data values
Args:
line: (str)
"""
t = line.strip().split()
obsname = t[HobsHeader.header[HobsHeader.obs_name]]
dict_name = obsname
if self._strip_after:
dict_name = obsname.split(self._strip_after)[0]
simval = float(t[HobsHeader.header[HobsHeader.sim_head]])
obsval = float(t[HobsHeader.header[HobsHeader.obs_head]])
residual = simval - obsval
date = self.__set_datetime_object(t[HobsHeader.header[HobsHeader.date]])
decimal_date = float(t[HobsHeader.header[HobsHeader.dyear]])
if dict_name in self:
self[dict_name]['simval'].append(simval)
self[dict_name]['obsval'].append(obsval)
self[dict_name]['date'].append(date)
self[dict_name]['decimal_date'].append(decimal_date)
self[dict_name]['residual'].append(residual)
self[dict_name]["obsname"].append(obsname)
else:
self[dict_name] = {"obsname": [obsname], "date": [date],
"decimal_date": [decimal_date],
"simval": [simval], "obsval": [obsval],
"residual": [residual]}
def __set_header(self, line):
"""
Reads header line and sets header index
Parameters
----------
line : str
first line of the HOB file
"""
n = 0
s = ""
for i in line:
s += i
if s in HobsHeader.header:
HobsHeader.header[s] = n
n += 1
s = ""
elif s in (" ", "\t", "\n"):
s = ""
else:
pass
for key, value in HobsHeader.header.items():
if value is None:
raise AssertionError("HobsHeader headings must be updated")
def __set_datetime_object(self, s):
"""
Reformats a string of YYYY-mm-dd to a datetime object
Parameters
----------
s : str
string of YYYY-mm-dd
Returns
-------
datetime.date
"""
return dt.datetime.strptime(s, "%Y-%m-%d")
def __get_date_string(self, date):
"""
Parmaeters
----------
date: datetime.datetime object
Returns
-------
string
"""
return date.strftime("%Y/%m/%d")
@property
def obsnames(self):
"""
Return a list of obsnames from the HobsOut dictionary
"""
return self.keys()
def to_dataframe(self):
"""
Method to get a pandas dataframe object of the
HOBs data.
Returns
-------
pd.DataFrame
"""
import pandas as pd
if self._dataframe is None:
df = None
for hobsname, d in self.items():
t = pd.DataFrame(d)
if df is None:
df = t
else:
df = | pd.concat([df, t], ignore_index=True) | pandas.concat |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Demark
# TODO
def pivot_demark(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot,s1,r1=[],[],[]
for i in range(len(df_)):
if df_['open'][i]==df_['close'][i]:
x=df_['high'][i]+df_['low'][i]+2*df_['close'][i]
elif df_['close'][i]>df_['open'][i]:
x=2*df_['high'][i]+df_['low'][i]+df_['close'][i]
else:
x=df_['high'][i]+2*df_['low'][i]+df_['close'][i]
pivot.append(x/4)
s1.append(x/2 - df_["high"][i])
r1.append(x/2 - df_["low"][i])
data_ = pd.DataFrame(pivot,columns=['pivot'])
data_['s1']=s1
data_['r1']=r1
return data_
# [0] __ Pivot Fibonacci
# TODO
def pivot_fibonacci(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = pivot - ((df_["high"] - df_["low"])*0.382)
s2 = pivot - ((df_["high"] - df_["low"])*0.618)
s3 = pivot - (df_["high"] - df_["low"])
s4 = pivot + ((df_["high"] - df_["low"])*1.382)
r1 = pivot + ((df_["high"] - df_["low"])*0.382)
r2 = pivot + ((df_["high"] - df_["low"])*0.618)
r3 =pivot + (df_["high"] - df_["low"])
r4 = pivot + (df_["high"] - df_["low"])*1.382
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
| pd.Series(s3, name="s3") | pandas.Series |
import os
import sys
import json
import datetime
import numpy as np
import pandas as pd
import statistics
import cv2
import skimage.draw
import tensorflow as tf
import keras
import time
import glob
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import RandomizedSearchCV
import joblib
from .mrcnn.config import Config
from .mrcnn import model as modellib, utils
from .mask_saving import Masked_Image
from .Preprocessing_opt import PreprocessImages
class Sputum:
"""
Class Sputum contains all the necessary methods for the Mask RCNN Implementation,
including the training, inference and prediction. Then the Machine Learning Model
for evalutation and a scoring method for the final evaluation.
Parameters
----------
root_dir : str
Path to the root directory. In this directory the algorithm will be saving all the important files
such as coco weight file, then inside the log folder, the trained weight files will be saved after
every epoch. It is mandatory to set the root_dir.
"""
def __init__(self,root_dir=None):
self.root_dir = root_dir
def train_mask_rcnn(self,dataset_path,weights,epochs):
"""
This function trains the mask rcnn model based on the dataset and the weights.
Parameters
----------
dataset_path : str
Path to the dataset. Inside this path there should be two folders: train and val.
Inside this train and val folders the annotation file with respect to it should be
saved as "train.json" only in both the folers.
weights : str, options= "coco" or path to the saved weight file
Path to the weight file. If pretained weight exist give that path else give "coco", which will
download the coco weight file from the internet and save it inside the root directory."
epochs : int
Number of epochs for training.
Returns
-------
Weight Files
Trained weight files will be saved inside the root directory.
"""
# Root directory of the project
ROOT_DIR = os.path.abspath(self.root_dir)
print("Root Directory is: ",ROOT_DIR)
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
print("Model Directory is: ",MODEL_DIR)
# Path to trained weights file
if weights=='coco':
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_WEIGHTS_PATH):
utils.download_trained_weights(COCO_WEIGHTS_PATH)
else:
COCO_WEIGHTS_PATH=weights
if not os.path.exists(COCO_WEIGHTS_PATH):
print("Invalid Path to weights file")
# Directory to save logs and model checkpoints, if not provided
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = SputumConfig()
############################################################
# Dataset
############################################################
class SputumDataset(utils.Dataset):
def load_sputum(self, dataset_dir, subset):
"""Load a subset of the hail dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one classes to add.
self.add_class("Sputum", 1, "sputum")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
annotations = json.load(open(os.path.join(dataset_dir, "train.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [s['region_attributes'] for s in a['regions']]
class_ids = [int(n['Sputum']) for n in objects]
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
###changed###
for i,p in enumerate(polygons):
all_p_x=np.array(p['all_points_x'])
all_p_y=np.array(p['all_points_y'])
all_p_x[all_p_x>=width]=width-1
all_p_y[all_p_y>=height]=height-1
polygons[i]['all_points_x']=list(all_p_x)
polygons[i]['all_points_y']=list(all_p_y)
self.add_image(
"Sputum",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
class_ids=class_ids)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a hail dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "Sputum":
return super(self.__class__, self).load_mask(image_id)
class_ids = image_info['class_ids']
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
print("info['class_ids']=", info['class_ids'])
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids#[mask.shape[-1]] #np.ones([mask.shape[-1]], dtype=np.int32)#class_ids.astype(np.int32)
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "Sputum":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
# Training dataset.
dataset_train = SputumDataset()
dataset_train.load_sputum(dataset_path, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = SputumDataset()
dataset_val.load_sputum(dataset_path, "val")
dataset_val.prepare()
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs,
layers='heads')
print("Training is Complete.")
print("Model Saved at :",MODEL_DIR)
def crop_maskrcnn(self,model_dir, coco_model_path, image_path, image_name, output_path):
"""This function crops the bounding box image based on the maskrcnn model output. Mask RCNN
algorithm detects the object of interest in the imagine and the shaded part detected is cropped and
saved to the output_path.
Parameters
----------
model_dir : str
Path to the model directory.
model_path : str
Path to the trained model or coco model.
image_path : str
Path to the image for which cropping is to be done.
image_name : str
Name of the image.
output_path : str
Path to the output directory where the image is to be saved.
Returns
-------
Images
The Predicted part will be cropped and saved in the output directory.
"""
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
class InferenceConfig(SputumConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
session = tf.Session()
keras.backend.set_session(session)
configs = InferenceConfig()
with modellib.MaskRCNN(mode="inference", model_dir=model_dir, config=configs) as models:
models.load_weights(coco_model_path, by_name=True)
print('Sputum weight file loaded')
preprocess_obj = PreprocessImages()
preprocess_obj.crop_run(image_path,image_name,output_path,models,session)
def mask_saving(self,model_dir, coco_model_path, image_path, image_name, output_path):
"""The Mask RCNN model when given a image detects the area of interest. This method
saves that predictions as a image file which can be used for evaluating how good the
model is. The Image is saved in the output_path given.
Parameters
----------
model_dir : str
Path to the model directory.
model_path : str
Path to the trained model or coco model.
image_path : str
Path to the image for which cropping is to be done.
image_name : str
Name of the image.
output_path : str
Path to the output directory where the image is to be saved.
Returns
-------
Images
The Predicted part will be saved in the output directory.
"""
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
class InferenceConfig(SputumConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
session = tf.Session()
keras.backend.set_session(session)
configs = InferenceConfig()
with modellib.MaskRCNN(mode="inference", model_dir=model_dir, config=configs) as models:
models.load_weights(coco_model_path, by_name=True)
print('Sputum weight file loaded')
preprocess_obj = Masked_Image()
preprocess_obj.sputum_mask_segment(image_path,image_name,output_path,models,session)
def sputum_ml_feature_extract(self, path_to_csv, crop_dir, feature_folder):
"""Based on the cropped images, this method extracts the features from the images. This
feature extraction method is for training the Machine Learning model. There is 24 features
extracted without including the score.
Parameters
----------
path_to_csv : str
Path to csv files containing the labels and other attributes.
crop_dir : str
Path to the directory where the cropped images are saved.
feature_folder : str
Path to the directory where the features are to be saved.
Returns
-------
Parquet file
A Parquet file will be saved inside the feature folder containing various features.
The File format will be having a timestamp.
"""
df=pd.read_csv(path_to_csv)
result=[]
for i in range(len(df)):
s = df['Score'][i]
image_name=df['image_name'][i]
img_path=os.path.join(crop_dir,image_name)
img=cv2.imread(img_path)
img_resized=cv2.resize(img,(110,70))
b,g,r=cv2.split(img_resized)
b_list = []
for i in range(len(b)):
for j in range(len(b[i])):
b_list.append(b[i][j])
g_list = []
for i in range(len(g)):
for j in range(len(g[i])):
g_list.append(g[i][j])
r_list = []
for i in range(len(r)):
for j in range(len(r[i])):
g_list.append(r[i][j])
b_a = np.array(b_list)
g_a = np.array(g_list)
r_a = np.array(r_list)
if len(b_a)!=0:
b_mean = cv2.mean(b)
b_max = np.max(b_a)
b_median = statistics.median(b_a)
b_10 = np.percentile(b_a, 10)
b_25 = np.percentile(b_a, 25)
b_50 = np.percentile(b_a, 50)
b_75 = np.percentile(b_a, 75)
b_100 = np.percentile(b_a, 100)
else:
b_mean = [0]
b_max = 0
b_median = 0
b_10 = 0
b_25 = 0
b_50 = 0
b_75 = 0
b_100 = 0
if len(g_a)!=0:
g_mean = cv2.mean(g)
g_median = statistics.median(g_a)
g_max = np.max(g_a)
g_10 = np.percentile(g_a, 10)
g_25 = np.percentile(g_a, 25)
g_50 = np.percentile(g_a, 50)
g_75 = np.percentile(g_a, 75)
g_100 = np.percentile(g_a, 100)
else:
g_mean = [0]
g_median = 0
g_max = 0
g_10 = 0
g_25 = 0
g_50 = 0
g_75 = 0
g_100 = 0
if len(r_a)!=0:
r_mean = cv2.mean(r)
r_max = np.max(r_a)
r_median = statistics.median(r_a)
r_10 = np.percentile(r_a, 10)
r_25 = np.percentile(r_a, 25)
r_50 = np.percentile(r_a, 50)
r_75 = np.percentile(r_a, 75)
r_100 = np.percentile(r_a, 100)
else:
r_mean = [0]
r_max = 0
r_median = 0
r_10 = 0
r_25 = 0
r_50 = 0
r_75 = 0
r_100 = 0
result.append({'img':image_name,'B_Mean':b_mean[0],'G_Mean':g_mean[0],'R_Mean':r_mean[0],
'B_Max':b_max,'G_Max':g_max,'R_Max':r_max,
'B_Median':b_median,'G_Median':g_median,'R_Median':r_median,
'B_10':b_10,'B_25':b_25,'B_50':b_50,'B_75':b_75,'B_100':b_100,
'G_10': g_10, 'G_25': g_25, 'G_50': g_50, 'G_75': g_75, 'G_100': g_100,
'R_10': r_10, 'R_25': r_25, 'R_50': r_50, 'R_75': r_75, 'R_100': r_100,'Score':s})
| pd.DataFrame(result) | pandas.DataFrame |
import pandas as pd
import numpy as np
from copy import deepcopy
from collections import Counter
from sklearn.metrics import calinski_harabasz_score
from sklearn.cluster import (
KMeans,
AgglomerativeClustering,
MiniBatchKMeans
)
from minisom import MiniSom
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import (
StratifiedKFold,
StratifiedShuffleSplit
)
from imblearn.under_sampling.base import BaseCleaningSampler
from .utils import get_2Dcoordinates_matrix
from sklearn.ensemble import IsolationForest
################################################################################
# iForest
################################################################################
class PerClassiForest(BaseCleaningSampler):
def __init__(self,
n_estimators=100,
max_samples='auto',
contamination=0.1,
max_features=1.0,
bootstrap=False,
n_jobs=None,
behaviour='new',
random_state=None,
verbose=0,
warm_start=False
):
self.n_estimators = n_estimators
self.max_samples = max_samples
self.contamination = contamination
self.max_features = max_features
self.bootstrap = bootstrap
self.n_jobs = n_jobs
self.behaviour = behaviour
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.iForest_ = IsolationForest(
n_estimators = self.n_estimators,
max_samples = self.max_samples,
contamination = self.contamination,
max_features = self.max_features,
bootstrap = self.bootstrap,
n_jobs = self.n_jobs,
behaviour = self.behaviour,
random_state = self.random_state,
verbose = self.verbose,
warm_start = self.warm_start
)
def fit(self, X, y):
self.iforests = {}
#outcome = np.zeros(X.shape[0])
for label in np.unique(y):
iforest = deepcopy(self.iForest_)
#outcome[y==label] = iforest.fit_predict(X[y==label])
self.iforests[label] = iforest.fit(X[y==label])
return self
def resample(self, X, y):
outcome = np.zeros(X.shape[0])
for label in np.unique(y):
outcome[y==label] = self.iforests[label].predict(X[y==label])
return X[outcome==1], y[outcome==1]
def _fit_resample(self, X, y):
self.iforests = {}
outcome = np.zeros(X.shape[0])
for label in np.unique(y):
iforest = deepcopy(self.iForest_)
outcome[y==label] = iforest.fit_predict(X[y==label])
self.iforests[label] = iforest.fit(X[y==label])
return X[outcome==1], y[outcome==1]
def fit_resample(self, X, y):
return self._fit_resample(X, y)
################################################################################
# Paris new
################################################################################
class ParisDataFiltering(BaseCleaningSampler):
def __init__(self, k_max=6, random_state=None):
self.k_max = k_max
self.random_state = random_state
def fit(self, X, y, ids=None):
return self
def resample(self, X, y, ids=None):
if ids is None:
ids=y
status = np.zeros(y.shape)*np.nan
cluster = np.zeros(y.shape)*np.nan
for pol_id in np.unique(ids):
_labels = _find_optimal_k_and_cluster(X=X[ids==pol_id], k_max=self.k_max, random_state=self.random_state)
cluster[ids==pol_id] = _labels
status[ids==pol_id] = get_dominant_pixels(_labels)
final_status = np.zeros(y.shape).astype(bool)
for label in np.unique(y):
_final_status = final_status[y==label]
_clu = cluster[y==label][status[y==label].astype(bool)]
_ids = ids[y==label][status[y==label].astype(bool)]
_ban = X[y==label][status[y==label].astype(bool)]
unique_ids = np.unique(_ids)
b_dist = np.zeros(unique_ids.shape)*np.nan
for i, polygon_cluster_id in enumerate(unique_ids):
b = _ban[_ids==polygon_cluster_id]
b_dist[i] = Bhattacharyya(_ban, b)
ranks = b_dist.argsort().argsort()
accepted = unique_ids[ranks<int(np.ceil(ranks.shape[0]*.65))]
_final_status[status[y==label].astype(bool)] = np.isin(_ids, accepted)
final_status[y==label] = _final_status
return X[final_status]
def _fit_resample(self, X, y, ids=None):
return self.transform(X, y, ids)
def fit_resample(self, X, y, ids=None):
return self.resample(X, y, ids)
def _find_optimal_k_and_cluster(X, k_max=12, random_state=None):
label_list = []
CH_score = []
for k in range(2,k_max+1):
if X.shape[0] > k:
labels = KMeans(n_clusters=k, n_init=10, max_iter=300, random_state=random_state, n_jobs=None).fit_predict(X)
score = calinski_harabasz_score(X, labels)
label_list.append(labels)
CH_score.append(score)
return label_list[np.argmax(CH_score)]
def get_dominant_pixels(labels):
return labels==Counter(labels).most_common(1)[0][0]
def Bhattacharyya(a, b):
a_mean = np.expand_dims(a.mean(axis=0), 1)
a_cov = np.cov(a.T)
b_mean = np.expand_dims(b.mean(axis=0), 1)
b_cov = np.cov(b.T)
sigma = (a_cov + b_cov)/2
sigma_inv = np.linalg.inv(sigma)
term_1 = (1/8)*np.dot(np.dot((a_mean-b_mean).T,sigma_inv),(a_mean-b_mean))
#term_2 = (1/2)*np.log(np.linalg.det(sigma)/np.sqrt(np.linalg.det(a_cov)*np.linalg.det(b_cov)))
#return float(np.squeeze(term_1+term_2))
return term_1
################################################################################
# Filter based methods
################################################################################
class MBKMeansFilter(BaseCleaningSampler):
"""My own method"""
def __init__(self, n_splits=5, granularity=5, method='obs_percent', threshold=0.5, random_state=None):
assert method in ['obs_percent', 'mislabel_rate'], 'method must be either \'obs_percent\', \'mislabel_rate\''
super().__init__(sampling_strategy='all')
self.n_splits = n_splits
self.granularity = granularity
self.method = method
self.threshold = threshold
self.random_state = random_state
def _fit_resample(self, X, y, filters):
#assert X.shape[0]==y.shape[0], 'X and y must have the same length.'
## cluster data
#print('n_splits:', self.n_splits, ', granularity:', self.granularity, ', method:', self.method, ', threshold:', self.threshold, ', random_state:', self.random_state)
self.filters = deepcopy(filters)
index = np.arange(len(y))
clusters_list = []
index_list = []
self.kmeans = {}
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters, kmeans = self._KMeans_clustering(X_label)
self.kmeans[analysis_label] = kmeans
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
self.filter_list = {}
filter_outputs = {}
for n, (_, split) in enumerate(self.stratifiedkfold.split(X, y_)):
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[split], y_[split])
filter_outputs[f'filter_{n}_{name}'] = classifier.predict(X)
self.filter_list[f'{n}_{name}'] = classifier
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col = pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
y_col = pd.Series(data=y, index=index, name='y')
df = cluster_col.to_frame().join(y_col).join(mislabel_col)
df['count'] = 1
df_cluster_info_grouped = df.groupby(['y', 'cluster'])\
.agg({'mislabel_rate':np.mean, 'count':'count'})\
.sort_values(['mislabel_rate'])
df_cluster_info_A = df_cluster_info_grouped.groupby(['y']).cumsum()\
.rename(columns={'count':'cumsum'}).drop(columns=['mislabel_rate'])
df_cluster_info = df_cluster_info_grouped.join(df_cluster_info_A)
if self.method=='mislabel_rate':
df_cluster_info['status'] = df_cluster_info['mislabel_rate']<=self.threshold
elif self.method=='obs_percent':
thresholds = df_cluster_info.groupby('y').max()['cumsum']*self.threshold
actual_thresholds = df_cluster_info[
df_cluster_info['cumsum']/thresholds>=1
]['cumsum'].groupby('y').min()
df_cluster_info['status'] = df_cluster_info['cumsum']/actual_thresholds<=1
# always accept cluster with lowest mislabel rate for each class by default
index_keys = df_cluster_info.reset_index().groupby('y').apply(
lambda x: x.sort_values('mislabel_rate').iloc[0]
)[['y','cluster']].values
df_cluster_info.loc[[tuple(i) for i in index_keys], 'status'] = True
results = df.join(df_cluster_info['status'], on=['y','cluster'])
self.status = results['status'].values
return X[self.status], y[self.status]
def fit(self, X, y, filters):
"""Fits filter to X, y."""
self._fit_resample(X, y, filters)
return self
def resample(self, X, y):
index = np.arange(len(y))
clusters_list = []
index_list = []
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters = self.kmeans[analysis_label].predict(X_label)
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
filter_outputs = {}
for name, classifier in self.filter_list.items():
filter_outputs[f'filter_{name}'] = classifier.predict(X)
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col = pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
y_col = pd.Series(data=y, index=index, name='y')
df = cluster_col.to_frame().join(y_col).join(mislabel_col)
df['count'] = 1
df_cluster_info_grouped = df.groupby(['y', 'cluster'])\
.agg({'mislabel_rate':np.mean, 'count':'count'})\
.sort_values(['mislabel_rate'])
df_cluster_info_A = df_cluster_info_grouped.groupby(['y']).cumsum()\
.rename(columns={'count':'cumsum'}).drop(columns=['mislabel_rate'])
df_cluster_info = df_cluster_info_grouped.join(df_cluster_info_A)
if self.method=='mislabel_rate':
df_cluster_info['status'] = df_cluster_info['mislabel_rate']<=self.threshold
elif self.method=='obs_percent':
thresholds = df_cluster_info.groupby('y').max()['cumsum']*self.threshold
actual_thresholds = df_cluster_info[
df_cluster_info['cumsum']/thresholds>=1
]['cumsum'].groupby('y').min()
df_cluster_info['status'] = df_cluster_info['cumsum']/actual_thresholds<=1
# always accept cluster with lowest mislabel rate for each class by default
index_keys = df_cluster_info.reset_index().groupby('y').apply(
lambda x: x.sort_values('mislabel_rate').iloc[0]
)[['y','cluster']].values
df_cluster_info.loc[[tuple(i) for i in index_keys], 'status'] = True
results = df.join(df_cluster_info['status'], on=['y','cluster'])
self.status = results['status'].values
return X[self.status], y[self.status]
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
def _KMeans_clustering(self, X):
"""Private function to..."""
if self.granularity>=np.sqrt(X.shape[0]):
self.granularity=int(np.sqrt(X.shape[0]))-1
k = int(self.granularity*np.sqrt(X.shape[0]))
k = k if k>=1 else 1
kmeans = MiniBatchKMeans(k, max_iter=5*k, tol=0, max_no_improvement=400, random_state=self.random_state)
labels = kmeans.fit_predict(X).astype(str)
return labels, kmeans
class EnsembleFilter(BaseCleaningSampler):
"""Identifying Mislabeled Training Data, by <NAME> Friedl (1999)"""
def __init__(self, n_splits=4, threshold=0.5, random_state=None):
super().__init__(sampling_strategy='all')
self.n_splits = n_splits
self.threshold = threshold
self.random_state = random_state
def _fit_resample(self, X, y, filters):
self.filters = deepcopy(filters)
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
## run filter
self.filter_list = {}
filter_outputs = {f'filter_{name}':np.zeros((y.shape))-1 for name, _ in self.filters}
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
for n, (train_indices, test_indices) in enumerate(self.stratifiedkfold.split(X, y_)):
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[train_indices], y_[train_indices])
filter_outputs[f'filter_{name}'][test_indices] = classifier.predict(X[test_indices])
self.filter_list[f'{n}_{name}'] = classifier
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## filter data
self.status = mislabel_rate<=self.threshold
return X[self.status], y[self.status]
def fit(self, X, y, filters):
self._fit_resample(X, y, filters)
return self
def resample(self, X, y):
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
## run filter
indices = []
filter_outputs = {f'filter_{name}':np.zeros((y.shape))-1 for name, _ in self.filters}
for n, (train_indices, test_indices) in enumerate(self.stratifiedkfold.split(X, y_)):
for name in dict(self.filters).keys():
filter_outputs[name][test_indices] = self.filter_list[f'{n}_{name}'].predict(X[test_indices])
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## filter data
self.status = mislabel_rate<=self.threshold
return X[self.status], y[self.status]
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
class ChainFilter(BaseCleaningSampler):
"""Own method"""
def __init__(self, filter_obj, stopping_criteria='manual', tol=None, max_iter=40, random_state=None):
assert stopping_criteria in ['auto', 'manual'], '`stopping_criteria` must be either `auto` or `manual`'
if stopping_criteria=='auto': assert tol, '`tol` must be defined while `stopping_criteria` is defined as `auto`'
self.filter_methods = [deepcopy(filter_obj) for _ in range(max_iter)]
self.random_state = random_state
self.tol = tol
self.max_iter = max_iter
self.stopping_criteria = stopping_criteria
def _fit_resample(self, X, y, filters):
self.filters = deepcopy(filters)
X_nnf, y_nnf = X.copy(), y.copy()
self.filter_list = {}
for n, filter in enumerate(self.filter_methods):
filter = filter.fit(X_nnf, y_nnf, self.filters)
X_nnf, y_nnf = filter.resample(X, y)
self.filter_list[n] = filter
if n!=0 and self.stopping_criteria=='auto':
not_changed = dict(Counter(self.filter_list[n-1].status == self.filter_list[n].status))
percent_changes = not_changed[False]/sum(not_changed.values())
print(f'Percentage of status changes: {percent_changes*100}%')
if percent_changes<=self.tol:
break
self.final_filter = filter
return X_nnf, y_nnf
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
class ConsensusFilter(EnsembleFilter):
"""Identifying Mislabeled Training Data, by Brodley and Friedl (1999)"""
def __init__(self, n_splits=4, random_state=None):
super().__init__(n_splits=n_splits, threshold=1-.9e-15, random_state=random_state)
class MajorityVoteFilter(EnsembleFilter):
"""Identifying Mislabeled Training Data, by Brodley and Friedl (1999)"""
def __init__(self, n_splits=4, random_state=None):
super().__init__(n_splits=n_splits, threshold=.5, random_state=random_state)
class SingleFilter(EnsembleFilter):
"""Identifying Mislabeled Training Data, by <NAME> (1999)"""
def __init__(self, n_splits=4, random_state=None):
super().__init__(n_splits=n_splits, threshold=.5, random_state=random_state)
def fit_resample(self, X, y, filters):
if type(filters)==list: filters = [(filters[0].__class__.__name__,filters[0])]
else: filters = [(filters.__class__.__name__,filters)]
return super()._fit_resample(X, y, filters)
class MBKMeansFilter_reversed(BaseCleaningSampler):
"""My own method"""
def __init__(self, n_splits=5, granularity=5, method='obs_percent', threshold=0.5, random_state=None):
assert method in ['obs_percent', 'mislabel_rate'], 'method must be either \'obs_percent\', \'mislabel_rate\''
super().__init__(sampling_strategy='all')
self.n_splits = n_splits
self.granularity = granularity
self.method = method
self.threshold = threshold
self.random_state = random_state
def _fit_resample(self, X, y, filters):
#assert X.shape[0]==y.shape[0], 'X and y must have the same length.'
## cluster data
#print('n_splits:', self.n_splits, ', granularity:', self.granularity, ', method:', self.method, ', threshold:', self.threshold, ', random_state:', self.random_state)
self.filters = deepcopy(filters)
index = np.arange(len(y))
clusters_list = []
index_list = []
self.kmeans = {}
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters, kmeans = self._KMeans_clustering(X_label)
self.kmeans[analysis_label] = kmeans
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
self.filter_list = {}
filter_outputs = {f'filter_{name}':np.zeros((y.shape))-1 for name, _ in self.filters}
for n, (train_indices, test_indices) in enumerate(self.stratifiedkfold.split(X, y_)):
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[train_indices], y_[train_indices])
filter_outputs[f'filter_{name}'][test_indices] = classifier.predict(X[test_indices])
self.filter_list[f'{n}_{name}'] = classifier
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col = pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
y_col = pd.Series(data=y, index=index, name='y')
df = cluster_col.to_frame().join(y_col).join(mislabel_col)
df['count'] = 1
df_cluster_info_grouped = df.groupby(['y', 'cluster'])\
.agg({'mislabel_rate':np.mean, 'count':'count'})\
.sort_values(['mislabel_rate'])
df_cluster_info_A = df_cluster_info_grouped.groupby(['y']).cumsum()\
.rename(columns={'count':'cumsum'}).drop(columns=['mislabel_rate'])
df_cluster_info = df_cluster_info_grouped.join(df_cluster_info_A)
if self.method=='mislabel_rate':
df_cluster_info['status'] = df_cluster_info['mislabel_rate']<=self.threshold
elif self.method=='obs_percent':
thresholds = df_cluster_info.groupby('y').max()['cumsum']*self.threshold
actual_thresholds = df_cluster_info[
df_cluster_info['cumsum']/thresholds>=1
]['cumsum'].groupby('y').min()
df_cluster_info['status'] = df_cluster_info['cumsum']/actual_thresholds<=1
# always accept cluster with lowest mislabel rate for each class by default
index_keys = df_cluster_info.reset_index().groupby('y').apply(
lambda x: x.sort_values('mislabel_rate').iloc[0]
)[['y','cluster']].values
df_cluster_info.loc[[tuple(i) for i in index_keys], 'status'] = True
results = df.join(df_cluster_info['status'], on=['y','cluster'])
self.status = results['status'].values
return X[self.status], y[self.status]
def fit(self, X, y, filters):
"""Fits filter to X, y."""
self._fit_resample(X, y, filters)
return self
def resample(self, X, y):
index = np.arange(len(y))
clusters_list = []
index_list = []
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters = self.kmeans[analysis_label].predict(X_label)
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
filter_outputs = {}
for name, classifier in self.filter_list.items():
filter_outputs[f'filter_{name}'] = classifier.predict(X)
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col = pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
y_col = pd.Series(data=y, index=index, name='y')
df = cluster_col.to_frame().join(y_col).join(mislabel_col)
df['count'] = 1
df_cluster_info_grouped = df.groupby(['y', 'cluster'])\
.agg({'mislabel_rate':np.mean, 'count':'count'})\
.sort_values(['mislabel_rate'])
df_cluster_info_A = df_cluster_info_grouped.groupby(['y']).cumsum()\
.rename(columns={'count':'cumsum'}).drop(columns=['mislabel_rate'])
df_cluster_info = df_cluster_info_grouped.join(df_cluster_info_A)
if self.method=='mislabel_rate':
df_cluster_info['status'] = df_cluster_info['mislabel_rate']<=self.threshold
elif self.method=='obs_percent':
thresholds = df_cluster_info.groupby('y').max()['cumsum']*self.threshold
actual_thresholds = df_cluster_info[
df_cluster_info['cumsum']/thresholds>=1
]['cumsum'].groupby('y').min()
df_cluster_info['status'] = df_cluster_info['cumsum']/actual_thresholds<=1
# always accept cluster with lowest mislabel rate for each class by default
index_keys = df_cluster_info.reset_index().groupby('y').apply(
lambda x: x.sort_values('mislabel_rate').iloc[0]
)[['y','cluster']].values
df_cluster_info.loc[[tuple(i) for i in index_keys], 'status'] = True
results = df.join(df_cluster_info['status'], on=['y','cluster'])
self.status = results['status'].values
return X[self.status], y[self.status]
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
def _KMeans_clustering(self, X):
"""Private function to..."""
if self.granularity>=np.sqrt(X.shape[0]):
self.granularity=int(np.sqrt(X.shape[0]))-1
k = int(self.granularity*np.sqrt(X.shape[0]))
k = k if k>=1 else 1
kmeans = MiniBatchKMeans(k, max_iter=5*k, tol=0, max_no_improvement=400, random_state=self.random_state)
labels = kmeans.fit_predict(X).astype(str)
return labels, kmeans
## Algorithms that require testing/debugging/edition
class YuanGuanZhu(BaseCleaningSampler):
"""
Novel mislabeled training data detection algorithm, <NAME>, Zhu et al. (2018)
Filters used in paper: naive Bayes, decision tree, and 3-Nearest Neighbor
"""
def __init__(self, n_splits=3, t=40, method='majority', random_state=None):
"""method: `majority` or `consensus`"""
assert method in ['majority', 'consensus'], '`method` must be either `majority` or `minority`.'
if method == 'majority': method = 'MFMF'
elif method == 'consensus': method = 'CFMF'
super().__init__(sampling_strategy='all')
self.t = t
self.method = method
self.n_splits = 3
self.random_state = random_state
self.composite_filter = CompositeFilter(
method=self.method,
n_splits=self.n_splits,
random_state=self.random_state
)
def _fit_resample(self, X, y, filters):
self.filters = deepcopy(filters)
_sfk = StratifiedKFold(n_splits = self.t, shuffle=True, random_state=self.random_state)
statuses = np.zeros(y.shape)
for _, subset in _sfk.split(X, y):
compfilter = deepcopy(self.composite_filter)
compfilter.fit(X[subset],y[subset], self.filters)
statuses[subset] = compfilter.status
self.status = statuses
return X[self.status], y[self.status]
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
class CompositeFilter(BaseCleaningSampler):
"""
Based on "Novel mislabeled training data detection algorithm",
<NAME> et al. (2018).
`method`: `MFMF`, `CFCF`, `CFMF`, `MFCF`
"""
def __init__(self, method='MFMF', n_splits=4, random_state=None):
assert len(method)==4\
and method[-2:] in ['MF', 'CF']\
and method[:2] in ['MF', 'CF'], \
'Invalid `method` value passed.'
super().__init__(sampling_strategy='all')
self.method = method
self.n_splits = n_splits
self.random_state = random_state
def _fit_resample(self, X, y, filters):
self.filters = deepcopy(filters)
if self.method.startswith('MF'): self.threshold_1 = .5
else: self.threshold_1 = 1-.9e-15
if self.method.endswith('MF'): self.threshold_2 = .5
else: self.threshold_2 = 1-.9e-15
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
## run filter
self.filter_list = {}
voted_outputs_1 = {}
indices = []
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
for n, (train_indices, test_indices) in enumerate(self.stratifiedkfold.split(X, y_)):
filter_outputs = {}
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[train_indices], y_[train_indices])
filter_outputs[f'filter_{name}'] = classifier.predict(X)
self.filter_list[f'{n}_{name}'] = classifier
total_filters = len(filter_outputs.keys())
voted_outputs_1[n] = ((total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters) <= self.threshold_1
## mislabel rate
total_votes = len(voted_outputs_1.keys())
mislabel_rate = (pd.DataFrame(voted_outputs_1).values\
.astype(int).sum(axis=1))/total_votes
## filter data
self.status = mislabel_rate<=self.threshold_2
return X[self.status], y[self.status]
def fit(self, X, y, filters):
self._fit_resample(X, y, filters)
return self
def resample(self, X, y):
if self.method.startswith('MF'): self.threshold_1 = .5
else: self.threshold_1 = 1-.9e-15
if self.method.endswith('MF'): self.threshold_2 = .5
else: self.threshold_2 = 1-.9e-15
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
## run filter
voted_outputs_1 = {}
for n, (train_indices, test_indices) in enumerate(self.stratifiedkfold.split(X, y_)):
filter_outputs = {}
for name, clf in self.filters:
filter_outputs[f'filter_{name}'] = self.filter_list[f'{n}_{name}'].predict(X)
total_filters = len(filter_outputs.keys())
voted_outputs_1[n] = ((total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters) <= self.threshold_1
## mislabel rate
total_votes = len(voted_outputs_1.keys())
mislabel_rate = (pd.DataFrame(voted_outputs_1).values\
.astype(int).sum(axis=1))/total_votes
## filter data
self.status = mislabel_rate<=self.threshold_2
return X[self.status], y[self.status]
def fit_resample(self, X, y, filters):
return self._fit_resample(X, y, filters)
################################################################################
# OLD PARIS VERSION (DO NOT USE)
################################################################################
class pixel_selection:
def __init__(self, df, polygon_id_col, class_col=None, som_architecture=None, k_max=12):
"""df must have only band values, polygon_id_col and class_col"""
assert type(df)==pd.DataFrame, 'df needs to be of type `pd.DataFrame`.'
assert type(polygon_id_col)==str and type(class_col) in [str, type(None)], 'Both polygon_id_col and class_col need to be of type `str`.'
assert polygon_id_col in df.columns, f'{polygon_id_col} not in dataframe.'
self.methods = ['som', 'bhattacharyya', 'kmeans', 'hierarchical']
if not hasattr(self, '_previous_cluster_col'): self._previous_cluster_col = False
self._polygon_id = polygon_id_col
self.class_col = class_col
self.df = df.sort_values(by=self._polygon_id)
self.k = k_max
if som_architecture:
self.som_architectures = np.expand_dims(np.array(som_architecture), 0)
else:
self.som_architectures = get_2Dcoordinates_matrix((5,5)).reshape((2,-1))
self.som_architectures = self.som_architectures[:,np.apply_along_axis(lambda x: (x!=0).all() and (x!=1).any(), 0, self.som_architectures)].T
if self.df[self._polygon_id].dtype == np.dtype('O'):
self.is_string_identifier = True
self.label_encoder = LabelEncoder().fit(self.df['Label'])
self.df[self._polygon_id] = self.label_encoder.transform(self.df[self._polygon_id])
else:
self.is_string_identifier = False
if class_col: drop_cols = [self._polygon_id, self.class_col]
else: drop_cols = [self._polygon_id]
polygon_list = np.split(self.df.drop(columns=drop_cols), np.where(np.diff(self.df[self._polygon_id]))[0]+1)
# drop polygons with too few pixels to be relevant for classification
self._polygon_list = [x for x in polygon_list]# if len(x)>=10]
def get_clusters(self, method='som', cluster_col='clusters', identify_dominant_cluster=False, random_state=None):
"""stage 1"""
assert method in self.methods, f'Method {method} not implemented. Possible options are {self.methods}'
assert self._previous_cluster_col or method!='bhattacharyya', f'bhattacharyya method should only be used for consistency analysis.'
if method == 'som':
# Polygon clustering (SOM)
self.som_list = []
labels = []
indices = []
total = len(self._polygon_list) # testing
i=1
for polygon in self._polygon_list:
print(f'Clustering process: {i}/{total}'); i+=1
indices.append(polygon.index)
_labels, som = SOM_find_optimal_architecture_and_cluster(polygon.values, self.som_architectures, random_state)
self.som_list.append(som)
# generally you will want to use get_dominant_pixels only if the majority cluster is being passed for consistency analysis
if identify_dominant_cluster:
labels.append(get_dominant_pixels(_labels))
else:
labels.append(_labels)
elif method == 'bhattacharyya':
labels = []
indices = []
for polygon in self._polygon_list:
a = self._df[self._df[self._previous_cluster_col].isin(polygon.index)]\
.drop(columns=[self._polygon_id, self._previous_polygon_id, self._previous_cluster_col])\
.values
clusters_per_label = list(polygon.index)
pre_indices = []
pre_labels = []
for clust in clusters_per_label:
b = self._df[self._df[self._previous_cluster_col]==clust]\
.drop(columns=[self._polygon_id, self._previous_polygon_id, self._previous_cluster_col])\
.values
distance = Bhattacharyya(a, b)
pre_indices.append([clust])
pre_labels.append([distance])
indices_labels = np.array([pre_indices,pre_labels]).squeeze().T
indices_labels = indices_labels[indices_labels[:,1].astype(float).argsort()]
percentile_65 = int(indices_labels.shape[0]*.65)
indices_labels[:percentile_65,1] = True
indices_labels[percentile_65:,1] = False
labels.append(indices_labels[:,1])
indices.append(indices_labels[:,0].astype(str))
self.labels = labels
self.indices = indices
#indices = np.expand_dims(indices_labels[:,0], 1).astype(str).tolist()
#labels = np.expand_dims(indices_labels[:,1], 1).tolist()
elif method in ['kmeans', 'hierarchical']:
labels = []
indices = []
total = len(self._polygon_list) # testing
i = 1
for polygon in self._polygon_list:
print(f'Clustering process: {i}/{total}')
i += 1
indices.append(polygon.index)
_labels = find_optimal_k_and_cluster(X=polygon.values, k_max=self.k, method=method, random_state=random_state)
# generally you will want to use get_dominant_pixels only if the majority cluster is being passed for consistency analysis
if identify_dominant_cluster:
labels.append(get_dominant_pixels(_labels))
else:
labels.append(_labels)
else:
raise ValueError('method not yet implemented')
self.cluster_col = cluster_col
clusters = pd.Series(data=np.concatenate(labels), index=np.concatenate(indices), name=self.cluster_col)
self.df = self.df.join(clusters)
self.df[self.cluster_col] = self.df[self._polygon_id].astype(str)+'_'+self.df[self.cluster_col].astype(str)
return self.df
def get_consistency_analysis(self, consistency_col, method='som', class_col=None, cluster_col=None, random_state=None, som_architecture=None, k_max=None):
"""
stage 2
- SOM: Runs clustering based on Kohonen self-organizing maps
- Bhattacharyya: Distance based selection (keeps 65% of the clusters closest to the "centroid of centroids")
"""
if class_col: self.class_col = class_col
if cluster_col: self.cluster_col = cluster_col
assert self.cluster_col in self.df.columns, f'No columns with cluster id detected ({self.cluster_col}). Run self.get_clusters or manually add column with cluster values (pass column name on `cluster_col`)'
assert type(self.cluster_col)!=type(None), '`cluster_col` is not defined.'
assert method in self.methods, f'Method {method} not implemented. Possible options are {self.methods}'
assert self.class_col in self.df.columns, f'{self.class_col} not in dataframe.'
self._previous_polygon_id = deepcopy(self._polygon_id)
self._previous_cluster_col = deepcopy(self.cluster_col)
self._df = deepcopy(self.df)
pre_class_wide_clusters = self.df[[self.cluster_col, self.class_col]].drop_duplicates().set_index(self.cluster_col)
class_wide_clusters = self.df.drop(columns=[self._polygon_id, self.class_col]).groupby([self.cluster_col]).mean()
class_wide_clusters = class_wide_clusters.join(pre_class_wide_clusters)
self.__init__(class_wide_clusters, self.class_col)
if som_architecture:
self.som_architectures = np.expand_dims(np.array(som_architecture), 0)
else:
self.som_architectures = get_2Dcoordinates_matrix((5,5)).reshape((2,-1))
self.som_architectures = self.som_architectures[:,np.apply_along_axis(lambda x: (x!=0).all() and (x!=1).any(), 0, self.som_architectures)].T
if k_max:
self.k = k_max
else:
self.k = 2
cluster_info = self.get_clusters(method=method, cluster_col=consistency_col, identify_dominant_cluster=True, random_state=random_state)
mapper = cluster_info[consistency_col].apply(lambda x: x.split('_')[-1]=='True')\
.astype(int).to_dict()
self._df[consistency_col] = self._df[self._previous_cluster_col].map(mapper)
return self._df, cluster_info
def SOM_clustering(X, grid_shape, random_state=None):
# setup SOM
som = MiniSom(grid_shape[0],grid_shape[1],X.shape[1],sigma=0.8,learning_rate=0.6,random_seed=random_state)
# fit SOM
som.train_random(X, 2000)
# assign labels to node
labels = np.apply_along_axis(som.winner, 1, X).astype(str)
return np.char.add(labels[:,0], labels[:,1]), som
def SOM_find_optimal_architecture_and_cluster(X, nodes, random_state=None):
label_list = []
CH_score = []
som_list = []
for architecture in nodes:
if X.shape[0]>=architecture[0]*architecture[1]:
labels, som = SOM_clustering(X,architecture,random_state=random_state)
# Paris et al. 2019 uses the Calinski Harabasz score to identify the number of clusters to use
score = calinski_harabasz_score(X, labels)
label_list.append(labels)
CH_score.append(score)
som_list.append(som)
while len(label_list)==0:
nodes = np.clip(nodes-1, 1, None)
for architecture in nodes:
if X.shape[0]>=architecture[0]*architecture[1]:
labels, som = SOM_clustering(X,architecture,random_state=random_state)
label_list.append(labels)
CH_score.append(0)
som_list.append(som)
return label_list[np.argmax(CH_score)], som_list[np.argmax(CH_score)]
def find_optimal_k_and_cluster(X, k_max=12, method='kmeans', random_state=None):
label_list = []
CH_score = []
for k in range(2,k_max+1):
if X.shape[0] > k:
if method == 'kmeans':
labels = KMeans(n_clusters=k, n_init=10, max_iter=300, random_state=random_state, n_jobs=None).fit_predict(X)
elif method == 'hierarchical':
labels = AgglomerativeClustering(n_clusters=k, linkage='single').fit_predict(X)
score = calinski_harabasz_score(X, labels)
label_list.append(labels)
CH_score.append(score)
return label_list[np.argmax(CH_score)]
def get_dominant_pixels(labels):
l = pd.Series(labels)
labels_premapper = l\
.groupby(labels)\
.size()\
.sort_values(ascending=False)\
.to_frame()
labels_premapper['labels_choice'] = [True]+[False for i in range(len(labels_premapper)-1)]
mapper = labels_premapper[['labels_choice']].to_dict()['labels_choice']
return l.map(mapper)
def Bhattacharyya(a, b):
a_mean = np.expand_dims(a.mean(axis=0), 1)
a_cov = np.cov(a.T)
b_mean = np.expand_dims(b.mean(axis=0), 1)
b_cov = np.cov(b.T)
sigma = (a_cov + b_cov)/2
sigma_inv = np.linalg.inv(sigma)
term_1 = (1/8)*np.dot(np.dot((a_mean-b_mean).T,sigma_inv),(a_mean-b_mean))
term_2 = (1/2)*np.log(np.linalg.det(sigma)/np.sqrt(np.linalg.det(a_cov)*np.linalg.det(b_cov)))
return float(np.squeeze(term_1+term_2))
## Own methods
def KMeans_filtering(X, y, filters, n_splits, granularity, keep_rate, random_state=None):
assert X.shape[0]==y.shape[0], 'X and y must have the same length.'
## cluster data
index = np.arange(len(y))
clusters_list = []
index_list = []
for analysis_label in np.unique(y):
print(f'Label: {analysis_label}')
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters, kmeans = _KMeans_outlier_detection(X_label, granularity, random_state)
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
skf = StratifiedKFold(n_splits = n_splits, shuffle=True, random_state=random_state)
splits = []
for _, split_indices in skf.split(X, y_):
splits.append(split_indices)
filter_outputs = {}
for n, split in enumerate(splits):
print(f'Applying filter {n}')
for name, clf in filters:
classifier = deepcopy(clf)
classifier.fit(X[split], y_[split])
filter_outputs[f'filter_{n}_{name}'] = classifier.predict(X)
print(f'Applied classifier {name} (part of filter {n})')
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, | pd.DataFrame(filter_outputs) | pandas.DataFrame |
import gym
from gym import spaces
from gym.utils import seeding
from empyrical import max_drawdown, alpha_beta, sharpe_ratio, annual_return
import numpy as np
import pandas as pd
from tqdm import tqdm
import sys
#read in the number of lookback days as an argument
Lookback = int(sys.argv[1])
#get ray with alternative temp directory to avoid hpc permission issues
import ray
# Start up Ray. This must be done before we instantiate any RL agents.
ray.init(num_cpus=10, ignore_reinit_error=True, log_to_driver=False,_temp_dir="/rds/general/user/asm119/ephemeral")
#<NAME>' data loader and environment
def load_data(price_source='csvdata',tickers=['EEM','QQQ'],start='2008-01-02',end='2010-01-02'):
'''Returned price data to use in gym environment'''
# Load data
# Each dataframe will have columns date and a collection of fields
# TODO: DataLoader from mongoDB
# Raw price from DB, forward impute on the trading days for missing date
# calculate the features (log return, volatility)
if price_source in ['csvdata']:
feature_df = []
price_tensor = []
for t in tickers:
df1 = pd.read_csv('/rds/general/user/asm119/home/reinforcement_learning/Thomas/csvdata/{}.csv'.format(t)).set_index('date').loc[start:end]
feature_df.append(df1)
price_tensor.append(df1['return']) # assumed to the be log return of the ref price
ref_df_columns = df1.columns
# assume all the price_df are aligned and cleaned in the DataLoader
merged_df = | pd.concat(feature_df, axis=1, join='outer') | pandas.concat |
import json
import plotly
#import plotly.express as px
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def fn_labels_report(dataset,
max_c=False,
data_ret=False,
label_filter=False,
verbose=False):
'''This is a report only function!
Inputs:
- dataset (mandatory) - the target dataset for reporting about
- max_c (optional) - maximum counting - if you want to count for all
elements, set it as False (default=False)
- data_ret (optional) - if you want to return a Pandas Dataframe with
the results (default=False)
- verbose (optional) - if you want some verbosity (default=False)
Output:
- no output, shows reports about the labels counting
'''
expand_lst = ['related', 'request', 'offer', 'aid_related',
'infrastructure_related', 'weather_related',
'direct_report']
aid_lst = ['food', 'shelter', 'water', 'death', 'refugees', 'money',
'security', 'military', 'clothing', 'tools', 'missing_people',
'child_alone', 'search_and_rescue', 'medical_help',
'medical_products', 'aid_centers', 'other_aid']
weather_lst = ['earthquake', 'storm', 'floods', 'fire', 'cold',
'other_weather']
infrastructure_lst = ['buildings', 'transport', 'hospitals', 'electricity',
'shops', 'other_infrastructure']
if not label_filter: #all the labels
expand_list = expand_lst + aid_lst + weather_lst + infrastructure_lst
elif label_filter == 'main':
expand_list = ['related', 'request', 'offer', 'direct_report']
elif label_filter == 'related':
expand_list = ['aid_related', 'infrastructure_related', 'weather_related']
elif label_filter == 'expand':
expand_list = expand_lst
elif label_filter == 'aid':
expand_list = aid_lst
elif label_filter == 'weather':
expand_list = weather_lst
elif label_filter == "infra":
expand_list = infrastructure_lst
else:
raise Exception('invalid label_list parameter')
total = dataset.shape[0]
counts = []
#count for labels - not yet ordered!
for field in expand_list:
count = fn_count_valids(dataset=dataset, field=field)
percent = 100. * (count / total)
counts.append((count, field, percent))
#sort it as sorted tuples
sorted_tuples = sorted(counts, key=fn_getKey, reverse=True)
i=1
c=2
tuples_lst=[]
for cat in sorted_tuples:
count, field, percent = cat
print('{}-{}:{} ({:.1f}%)'.format(i, field, count, percent))
tuples_lst.append((field, count, percent))
if max_c:
if c > max_c:
break
i += 1
c += 1
df_report = pd.DataFrame(tuples_lst, columns = ['label', 'count', 'percentage'])
if data_ret:
return df_report
def fn_getKey(item):
'''This is an elementary function for returning the key from an item
from a list
Input:
- an item from a list
Output it´s key value
It is necessary to run the ordered list function
'''
return item[0]
def fn_count_valids(dataset,
field,
criteria=1,
verbose=False):
'''This function count all valids for a field in a dataset
Inputs:
- dataset (mandatory) - the dataset to be processed
- field (mandatory) - the field to be counted
- criteria (optional) - what counts as a valid one (defauld=1)
- verbose (optional) - if you want some verbosity (default=False)
Output:
- number of valid counts (Integer)
'''
if verbose:
print('###counting function initiated')
result = dataset[field][dataset[field] == criteria].sum()
return result
# load data
engine = create_engine('sqlite:///../data/Messages.db')
df = | pd.read_sql_table('Messages', engine) | pandas.read_sql_table |
import datetime
import itertools
import uuid
import numpy as np
import pandas as pd
import sklearn
import sklearn.linear_model
import sklearn.model_selection
import tqdm.auto as tqdm
import bb_network_decomposition.constants
import bb_network_decomposition.data
import bb_network_decomposition.normalization
import bb_network_decomposition.projection
import bb_network_decomposition.regression
import bb_network_decomposition.spectral
def dummy_iterator_wrapper(iterator):
yield from iterator
def evaluate_network_factors(
df,
model=None,
n_splits=25,
groupby=None,
labels=bb_network_decomposition.constants.location_labels,
factors=bb_network_decomposition.constants.default_factors,
scoring=None,
):
def get_factor(factor):
if "+" in factor:
return np.stack([get_factor(f) for f in factor.split("+")], axis=1)
is_log = "log" in factor
if is_log:
factor = factor[4:]
X = df[factor].values
if is_log:
X = np.log1p(X)
return X
if model is None:
model = sklearn.linear_model.LinearRegression()
if scoring is None:
scoring = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
if groupby is None:
groups = None
cv = sklearn.model_selection.ShuffleSplit(n_splits=n_splits)
else:
cv = sklearn.model_selection.GroupShuffleSplit(n_splits=n_splits)
if groupby == "days":
mapper = dict(zip(df.day.unique(), np.arange(len(df.day.unique()))))
groups = df.day.apply(lambda day: mapper[day])
elif groupby == "bees":
mapper = dict(zip(df.bee_id.unique(), range(len(df.bee_id.unique()))))
groups = df.bee_id.apply(lambda bee_id: mapper[bee_id])
else:
assert False
regression_results_df = []
for factor in factors:
for label in labels:
X = get_factor(factor)
Y = df[label].values
if X.ndim == 1:
X = X[:, None]
scores = sklearn.model_selection.cross_val_score(
model, X, Y, cv=cv, groups=groups, scoring=scoring, n_jobs=-1
)
for score in scores:
regression_results_df.append(
dict(variable=factor, target=label, r_squared=score)
)
regression_results_df = | pd.DataFrame(regression_results_df) | pandas.DataFrame |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
import anndata
###########################################################################
################# Related to input/error handling #########################
###########################################################################
###########################################################################
############################## Colors ###################################
###########################################################################
class TestColors(object):
# tests set_metadata_colors
# test set_metadata_colors - vanilla
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {'GM12878': 'red', 'K562': 'blue'}
test = sg.set_metadata_colors('sample', cmap)
assert sg.adata.uns.sample_colors == ['red', 'blue']
# test set_metadata_colors - obs_col does not exist
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {1: 'red', 2: 'blue'}
with pytest.raises(Exception) as e:
test = sg.set_metadata_colors('stage', cmap)
assert 'Metadata column' in str(e.value)
###########################################################################
################# Related to plotting Swan Plots ##########################
###########################################################################
class TestPlotting(object):
# done: test_new_gene, calc_pos_sizes, calc_edge_curves, plot_graph,
# plot_transcript_path
# init_plot_settings test do not check for indicate_novel / indicate settigns
# init_plot_settings tests do not check for new dataset addition
# test init_plot_settings - https://github.com/mortazavilab/swan_vis/issues/8
# gene summary -> transcript path (same gene) -> gene summary (same gene)
def test_init_9(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test5', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (same gene), also tests working from gene name
def test_init_8(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_graph('test2_gname', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (different gene)
def test_init_7(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test4_gid', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (same gene)
def test_init_6(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test3', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (different gene)
def test_init_5(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test5', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to gene summary (same gene)
def test_init_4(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test2', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to gene summary (different gene)
def test_init_3(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test1', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# transcript path (same gene)
def test_init_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# transcript path (different gene)
def test_init_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test4_gid', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test - indicate_dataset
def test_summary_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.datasets = ['dataset_1', 'dataset_2']
sg.t_df['dataset_1'] = [False, False, False, False, True]
sg.t_df['dataset_2'] = [True, True, True, True, True]
sg.edge_df['dataset_1'] = False
sg.edge_df['dataset_2'] = True
edges = [5,11,12]
sg.edge_df.loc[edges, 'dataset_1'] = True
sg.loc_df['dataset_1'] = False
sg.loc_df['dataset_2'] = True
locs = [12,11,8,7]
sg.loc_df.loc[locs, 'dataset_1'] = True
sg.plot_graph('test2_gid', display=False, indicate_dataset='dataset_1')
# edge_df
sg.pg.edge_df.drop(['dataset_1', 'dataset_2', 'curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', 'dashed'],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', 'dashed'],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', 'dashed']]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
sg.pg.loc_df.drop(['dataset_1', 'dataset_2'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', 'node_outline', 2],
[1, 'chr2', 80, True, False, False, 'internal', 'node_outline', 2],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', 'node_outline', 2],
[5, 'chr2', 50, True, False, True, 'TES', 'node_outline', 2],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 12:13:22 2020
@author: <NAME>
ReScale the data
"""
# Import library
import pandas as pd
import numpy as np
import itertools
from sklearn.preprocessing import MinMaxScaler
from sklearn.mixture import GaussianMixture
def rescale (adata, gate=None, return_gates=False, failed_markers=None, method='all'):
'''
Parameters:
data: AnnData object
gate: DataFrame with first column as markers and second column as the gate values in log1p scale
failed_markers: list. list of markers that are not expressed at all in any cell. pass in as ['CD20', 'CD3D']
Returns:
Ann Data object with rescaled data
Example:
adata = rescale (adata, gate=manual_gate, failed_markers=['CD20', 'CD21'])
'''
def rescale_independent (adata, gate=None, return_gates=False, failed_markers=None):
print('Scaling Image '+ str(adata.obs['ImageId'].unique()))
# Copy of the raw data if it exisits
if adata.raw is not None:
adata.X = adata.raw.X
data = pd.DataFrame(adata.X, columns = adata.var.index, index= adata.obs.index)
# Merging the manual gates and non-working markers togeather if any
if gate is not None:
m_markers = list(gate.iloc[:,0])
manual_gate_markers = gate
if failed_markers != None:
manual_gate_markers = pd.DataFrame(data[failed_markers].quantile(0.9999999))
manual_gate_markers['markers'] = failed_markers
# move column to front
cols = manual_gate_markers.columns.tolist()
cols.insert(0, cols.pop(cols.index('markers')))
manual_gate_markers = manual_gate_markers.reindex(columns= cols)
manual_gate_markers.columns = ['marker', 'gate']
m_markers = failed_markers
if gate is not None and failed_markers != None:
m_markers = list(gate.iloc[:,0]) + list(manual_gate_markers.iloc[:,0])
gate.columns = ['marker', 'gate']
manual_gate_markers = pd.concat([gate, manual_gate_markers])
if gate is None and failed_markers == None:
m_markers = []
# Find markers to send to gmm modelling
if gate is not None or failed_markers is not None:
gmm_markers = list(np.setdiff1d(data.columns, m_markers))
else:
gmm_markers = list(data.columns)
# If manual gate is not provided scale the data
if len(gmm_markers) != 0:
gmm_data = data[gmm_markers]
# Clip off the 99th percentile
def clipping (x):
clip = x.clip(lower =np.percentile(x,1), upper=np.percentile(x,99)).tolist()
return clip
# Run the function
gmm_data = gmm_data.apply(clipping)
# Scaling the data
sum_data = gmm_data.sum(axis=1) # Calculate total count for each cell
n_count = gmm_data.div(sum_data, axis=0) # Divide genes by total count for every cell
med = np.median(list(itertools.chain(*gmm_data.values.tolist()))) # Calculate median count of the entire dataset
n_count = n_count*med # Multiply by scaling fator (median count of entire dataset)
n_log = np.log1p(n_count) # Log transform data
scaler = MinMaxScaler(feature_range=(0, 1))
s = scaler.fit_transform(n_log)
normalised_data = pd.DataFrame(s, columns = gmm_data.columns, index= gmm_data.index)
# Gaussian fit to identify the gate for each marker and scale based on the gate
# Empty data frame to hold the results
all_gmm_data = pd.DataFrame()
def gmm_gating (data, marker, return_gates):
# Print
print('Finding the optimal gate for ' + str(marker))
# Identify the marker to fit the model
m = data[marker].values
# Perform GMM
data_gm = m.reshape(-1, 1)
#gmm = GaussianMixture(n_components=2, means_init=[[0],[1]],covariance_type='tied')
gmm = GaussianMixture(n_components=2)
gmm.fit(data_gm)
gate = np.mean(gmm.means_)
# Find the closest value to the gate
absolute_val_array = np.abs(m - gate)
smallest_difference_index = absolute_val_array.argmin()
closest_element = m[smallest_difference_index]
# rescale the data based on the identified gate
marker_study = pd.DataFrame(m, index= data.index)
marker_study = marker_study.sort_values(0)
# Find the index of the gate
gate_index = marker_study.index[marker_study[0] == closest_element][0]
# Split into high and low groups
high = marker_study.loc[gate_index:,:]
low = marker_study.loc[:gate_index,:]
# Prepare for scaling the high and low dataframes
scaler_high = MinMaxScaler(feature_range=(0.5, 1))
scaler_low = MinMaxScaler(feature_range=(0, 0.5))
# Scale it
h = pd.DataFrame(scaler_high.fit_transform(high), index = high.index)
l = pd.DataFrame(scaler_low.fit_transform(low), index = low.index)
# Merge the high and low and resort it
scaled_data = pd.concat([l,h])
scaled_data = scaled_data.loc[~scaled_data.index.duplicated(keep='first')]
scaled_data = scaled_data.reindex(data.index)
#return scaled_data
if return_gates == True:
return gate
else:
return scaled_data
# Apply the function
r_gmm_gating = lambda x: gmm_gating(data=normalised_data, marker=x,return_gates=return_gates) # Create lamda function
all_gmm_data = list(map(r_gmm_gating, gmm_markers)) # Apply function
all_gmm_data = | pd.concat(all_gmm_data, axis=1, sort=False) | pandas.concat |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"data, dtype",
[
(["x", "y", "z"], "string"),
pytest.param(
["x", "y", "z"],
"arrow_string",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
# https://github.com/pandas-dev/pandas/issues/35471
from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
ser = Series(data, dtype=dtype)
if errors == "ignore":
expected = ser
result = ser.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
ser.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = Series([0.1], dtype=dtype)
result = s.astype(str)
expected = Series(["0.1"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"value, string_value",
[
(None, "None"),
(np.nan, "nan"),
(NA, "<NA>"),
],
)
def test_astype_to_str_preserves_na(self, value, string_value):
# https://github.com/pandas-dev/pandas/issues/36904
s = Series(["a", "b", value], dtype=object)
result = s.astype(str)
expected = Series(["a", "b", string_value], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = | Series(["1", "2", "3", "4"], dtype=object) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ML Walk-Through"""
# pylint: disable=R0916, W0104, unused-argument, W0621
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
# Pandas and numpy for data manipulation
import numpy as np
import pandas as pd
# Seaborn for visualization
import seaborn as sns
# Matplotlib visualization
import matplotlib.pyplot as plt
# Internal ipython tool for setting figure size
from IPython.core.pylabtools import figsize
# Splitting data into training and testing
from sklearn.model_selection import train_test_split
# No warnings about setting value on copy of slice
pd.options.mode.chained_assignment = None
# Display up to 60 columns of a dataframe
pd.set_option('display.max_columns', 60)
# Set default font size
plt.rcParams['font.size'] = 24
# Seaborn Font Size
sns.set(font_scale=2)
# Get File Directory
WORK_DIR = os.path.dirname((os.path.realpath(__file__)))
# Read in data into a dataframe
data = pd.read_csv(WORK_DIR + "/data/Energy_and_Water_Data_Disclosure_for\
_Local_Law_84_2017__Data_for_Calendar_Year_2016_.csv")
# Display top of dataframe
print(data.head())
# See the column data types and non-missing values
data.info()
# Replace all occurrences of Not Available with numpy not a number
data = data.replace({'Not Available': np.nan})
# Iterate through the columns
for col in list(data.columns):
# Select columns that should be numeric
if ('ft²' in col or 'kBtu' in col or 'Metric Tons CO2e' in col
or 'kWh' in col or 'therms' in col or 'gal' in col
or 'Score' in col):
# Convert the data type to float
data[col] = data[col].astype(float)
# Statistics for each column
print(data.describe())
# Missing Values (From https://stackoverflow.com/questions/26266362)
# Function to calculate missing values by column
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(columns={
0: 'Missing Values',
1: '% of Total Values'
})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
# Print some summary information
print("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
missing_values = missing_values_table(data)
# Get the columns with > 50% missing
missing_df = missing_values_table(data)
print(missing_df)
missing_columns = list(missing_df[missing_df['% of Total Values'] > 50].index)
print('We will remove %d columns.' % len(missing_columns))
# Drop the columns
data = data.drop(columns=list(missing_columns))
figsize(8, 8)
# Rename the score
data = data.rename(columns={'ENERGY STAR Score': 'score'})
# Histogram of the Energy Star Score
plt.style.use('fivethirtyeight')
plt.hist(data['score'].dropna(), bins=100, edgecolor='k')
plt.xlabel('Score')
plt.ylabel('Number of Buildings')
plt.title('Energy Star Score Distribution')
plt.show()
# Histogram Plot of Site EUI
figsize(8, 8)
plt.hist(data['Site EUI (kBtu/ft²)'].dropna(), bins=20, edgecolor='black')
plt.xlabel('Site EUI')
plt.ylabel('Count')
plt.title('Site EUI Distribution')
plt.show()
print(data['Site EUI (kBtu/ft²)'].describe())
print(data['Site EUI (kBtu/ft²)'].dropna().sort_values().tail(10))
# One building is clearly far above the rest.
data.loc[data['Site EUI (kBtu/ft²)'] == 869265, :]
# Calculate first and third quartile
first_quartile = data['Site EUI (kBtu/ft²)'].describe()['25%']
third_quartile = data['Site EUI (kBtu/ft²)'].describe()['75%']
# Interquartile range
iqr = third_quartile - first_quartile
# Remove outliers
data = data[(data['Site EUI (kBtu/ft²)'] > (first_quartile - 3 * iqr))
& (data['Site EUI (kBtu/ft²)'] < (third_quartile + 3 * iqr))]
# Histogram Plot of Site EUI
figsize(8, 8)
plt.hist(data['Site EUI (kBtu/ft²)'].dropna(), bins=20, edgecolor='black')
plt.xlabel('Site EUI')
plt.ylabel('Count')
plt.title('Site EUI Distribution')
plt.show()
# Create a list of buildings with more than 100 measurements
types = data.dropna(subset=['score'])
types = types['Largest Property Use Type'].value_counts()
types = list(types[types.values > 100].index)
# Plot of distribution of scores for building categories
figsize(12, 10)
# Plot each building
for b_type in types:
# Select the building type
subset = data[data['Largest Property Use Type'] == b_type]
# Density plot of Energy Star scores
sns.kdeplot(subset['score'].dropna(), label=b_type, shade=False, alpha=0.8)
# label the plot
plt.xlabel('Energy Star Score', size=20)
plt.ylabel('Density', size=20)
plt.title('Density Plot of Energy Star Scores by Building Type', size=28)
plt.show()
# Create a list of boroughs with more than 100 observations
boroughs = data.dropna(subset=['score'])
boroughs = boroughs['Borough'].value_counts()
boroughs = list(boroughs[boroughs.values > 100].index)
# Plot of distribution of scores for boroughs
figsize(12, 10)
# Plot each borough distribution of scores
for borough in boroughs:
# Select the building type
subset = data[data['Borough'] == borough]
# Density plot of Energy Star scores
sns.kdeplot(subset['score'].dropna(), label=borough)
# label the plot
plt.xlabel('Energy Star Score', size=20)
plt.ylabel('Density', size=20)
plt.title('Density Plot of Energy Star Scores by Borough', size=28)
plt.show()
# Find all correlations and sort
correlations_data = data.corr()['score'].sort_values()
# Print the most negative correlations
print(correlations_data.head(15), '\n')
# Print the most positive correlations
print(correlations_data.tail(15))
# Select the numeric columns
numeric_subset = data.select_dtypes('number')
# Create columns with square root and log of numeric columns
for col in numeric_subset.columns:
# Skip the Energy Star Score column
if col == 'score':
next
else:
numeric_subset['sqrt_' + col] = np.sqrt(numeric_subset[col])
numeric_subset['log_' + col] = np.log(numeric_subset[col])
# Select the categorical columns
categorical_subset = data[['Borough', 'Largest Property Use Type']]
# One hot encode
categorical_subset = pd.get_dummies(categorical_subset)
# Join the two dataframes using concat
# Make sure to use axis = 1 to perform a column bind
features = pd.concat([numeric_subset, categorical_subset], axis=1)
# Drop buildings without an energy star score
features = features.dropna(subset=['score'])
# Find correlations with the score
correlations = features.corr()['score'].dropna().sort_values()
# Display most negative correlations
print(correlations.head(15))
# Display most positive correlations
print(correlations.tail(15))
figsize(12, 10)
# Extract the building types
features['Largest Property Use Type'] = data.dropna(
subset=['score'])['Largest Property Use Type']
# Limit to building types with more than 100 observations (from previous code)
features = features[features['Largest Property Use Type'].isin(types)]
# Use seaborn to plot a scatterplot of Score vs Log Source EUI
sns.lmplot('Site EUI (kBtu/ft²)',
'score',
hue='Largest Property Use Type',
data=features,
scatter_kws={
'alpha': 0.8,
's': 60
},
fit_reg=False,
size=12,
aspect=1.2)
# Plot labeling
plt.xlabel("Site EUI", size=28)
plt.ylabel('Energy Star Score', size=28)
plt.title('Energy Star Score vs Site EUI', size=36)
plt.show()
# Extract the columns to plot
plot_data = features[[
'score', 'Site EUI (kBtu/ft²)', 'Weather Normalized Source EUI (kBtu/ft²)',
'log_Total GHG Emissions (Metric Tons CO2e)'
]]
# Replace the inf with nan
plot_data = plot_data.replace({np.inf: np.nan, -np.inf: np.nan})
# Rename columns
plot_data = plot_data.rename(
columns={
'Site EUI (kBtu/ft²)': 'Site EUI',
'Weather Normalized Source EUI (kBtu/ft²)': 'Weather Norm EUI',
'log_Total GHG Emissions (Metric Tons CO2e)': 'log GHG Emissions'
})
# Drop na values
plot_data = plot_data.dropna()
# Function to calculate correlation coefficient between two columns
def corr_func(x, y, **kargs):
r = np.corrcoef(x, y)[0][1]
ax = plt.gca()
ax.annotate("r = {:.2f}".format(r),
xy=(.2, .8),
xycoords=ax.transAxes,
size=20)
# Create the pairgrid object
grid = sns.PairGrid(data=plot_data, size=3)
# Upper is a scatter plot
grid.map_upper(plt.scatter, color='red', alpha=0.6)
# Diagonal is a histogram
grid.map_diag(plt.hist, color='red', edgecolor='black')
# Bottom is correlation and density plot
grid.map_lower(corr_func)
grid.map_lower(sns.kdeplot, cmap=plt.cm.Reds)
# Title for entire plot
plt.suptitle('Pairs Plot of Energy Data', size=36, y=1.02)
plt.show()
# Copy the original data
features = data.copy()
# Select the numeric columns
numeric_subset = data.select_dtypes('number')
# Create columns with log of numeric columns
for col in numeric_subset.columns:
# Skip the Energy Star Score column
if col == 'score':
next
else:
numeric_subset['log_' + col] = np.log(numeric_subset[col])
# Select the categorical columns
categorical_subset = data[['Borough', 'Largest Property Use Type']]
# One hot encode
categorical_subset = | pd.get_dummies(categorical_subset) | pandas.get_dummies |
import sys
import datetime
import os.path
import copy
### Package imports
from PySide2 import QtCore, QtGui
from PySide2.QtWidgets import QFileDialog, QApplication, QMainWindow, QGridLayout, QGroupBox, QWidget, QPushButton, QLayout, QDialog, QLabel, QRadioButton, QVBoxLayout, QCheckBox
import numpy as np
import pyqtgraph as pg
import pandas as pd
from scipy.stats import chisquare, kstest
### SAFT imports
from dataStructures import HistogramFitStore as HFStore
from dataStructures import histogramFitParams
from utils import getRandomString, linePrint, txOutput
from quantal import nGaussians, fit_nGaussians, nGaussians_display, fit_nprGaussians, fit_PoissonGaussians_global, PoissonGaussians_display, nprGaussians_display, fit_nprGaussians_global, nprGaussians, poissonGaussians, cdf
def despace (s):
sp = " "
if sp in s:
return s.split(' ')[0]
else:
return s
class testData():
# if test data is loaded automatically, you can't load any more data
def __init__(self, *args, **kwargs):
self.open_file()
def open_file(self):
self.filename = "ExPeak_Data.xlsx"
#"None" reads all the sheets into a dictionary of data frames
self.histo_df = pd.read_excel(self.filename, None, index_col=0)
#print (self.file_dict)
class ROI_Controls(QtGui.QWidget):
#from Stack Overflow : https://stackoverflow.com/questions/56267781/how-to-make-double-arrow-button-in-pyqt
#and https://programming.vip/docs/pyqt5-note-7-multiple-class-sharing-signals.html
ctrl_signal = QtCore.Signal()
def __init__(self, parent, *args):
super(ROI_Controls, self).__init__(*args)
self.parent = parent
self.ROI_box = QGroupBox("ROI")
l = QGridLayout()
buttonLL = QtGui.QToolButton()
buttonLL.setIcon(buttonLL.style().standardIcon(QtGui.QStyle.SP_MediaSeekBackward))
buttonL = QtGui.QToolButton()
buttonL.setArrowType(QtCore.Qt.LeftArrow)
buttonR = QtGui.QToolButton()
buttonR.setArrowType(QtCore.Qt.RightArrow)
buttonRR = QtGui.QToolButton()
buttonRR.setIcon(buttonRR.style().standardIcon(QtGui.QStyle.SP_MediaSeekForward))
buttonList = [buttonLL, buttonL, buttonR, buttonRR]
bsize = (80, 20)
for b in buttonList:
b.setFixedSize(*bsize)
_clearFitsBtn = QPushButton('Clear recent fits')
buttonList.append(_clearFitsBtn)
_clearFitsBtn.setFixedWidth(150)
_storeAdvBtn = QPushButton('Next ROI, keep fits')
buttonList.append(_storeAdvBtn)
_storeAdvBtn.setFixedWidth(150)
_skipBtn = QPushButton('Next ROI, discard fits')
buttonList.append(_skipBtn)
_skipBtn.setFixedWidth(150)
posn = [(0,0,1,3), (0,3,1,3), (0,6,1,3), (0,9,1,3), (1,0,1,4), (1,4,1,4), (1,8,1,4)]
for counter, btn in enumerate(buttonList):
btn.pressed.connect(lambda val=counter: self.buttonPressed(val))
#self.ctrl_signal.connect(parent.ctrl_signal.emit)
l.addWidget(btn, *posn[counter])
self.ROI_label = QtGui.QLabel("-")
self.ROI_label.setFixedSize(250, 40)
self.ROI_label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
l.addWidget(self.ROI_label, 2, 0, 1, -1)
self.ROI_box.setLayout(l)
def update_ROI_label(self, t):
self.ROI_label.setText (t)
#with pyside2 5.13 this label (and other GUI items) doesn't update
def buttonPressed(self, _b):
#print (_b)
self.parent.ROI_change_command (_b)
class HDisplay():
def __init__(self, *args, **kwargs):
self.glw = pg.GraphicsLayoutWidget()
self.hrc = (0, 0, 1, 1) #same as below
self.h = self.glw.addPlot(title="<empty>", *self.hrc)
self.h.setLabel('left', "N")
self.h.setLabel('bottom', "dF / F")
self.h.vb.setLimits(yMin=0)
self.h.addLegend(offset=(-10, 5))
self.stack = pg.GraphicsLayout()
def updateTitle(self, newTitle):
self.h.setTitle (newTitle)
def createSplitHistLayout(self, keys):
"""separated view with each Histogram stacked in separate plots"""
# adapted from SAFT
# Store the plot items in a list - can't seem to get them easily otherwise?
data = [] # empty
self.stackMembers = []
for _condition in keys:
memberName = _condition + " histogram"
stack_member = self.stack.addPlot(y=data, name=memberName)
stack_member.vb.setLimits(yMin=0)
stack_member.hideAxis('bottom')
stack_member.addLegend(offset=(-10, 5))
stack_member.setLabel('left', "N")
self.stackMembers.append(stack_member)
self.stack.nextRow()
#print (c, len(self.p1stackMembers))
#link y-axes - using final member of stack as anchor
for s in self.stackMembers:
if s != stack_member:
s.setXLink(stack_member)
s.setYLink(stack_member)
#add back bottom axis to the last graph
stack_member.showAxis("bottom")
stack_member.setLabel('bottom', "dF / F")
class histogramFitDialog(QDialog):
ctrl_signal = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(histogramFitDialog, self).__init__(*args, **kwargs)
_now = datetime.datetime.now()
#can be set to "None" following a long fit routine to stop cycling
self.fitHistogramsOption = "Summed"
self.outputHeader = "{} Logfile\n".format(_now.strftime("%y%m%d-%H%M%S"))
self.hPlot = HDisplay()
self.autoSave = True # must match initial state of checkbox!
self.saveFits = True # must match initial state of checkbox!
self.filename = None
self.dataname = None
self.fitPColumns = ['ROI', 'Fit ID', 'N', 'Pr/mu', 'Events', 'Width', 'dF_Q', 'Test', 'Stat.', 'P_val', 'Type']
self.fitInfoHeader = linePrint(self.fitPColumns)
self.outputF = txOutput(self.outputHeader)
self.current_ROI = None
self.ROI_SD = 0 # the SD for the ROI (assumed similar over conditions)
self.maxFlag = "Auto max" # how to find histogram x-axis
self.fixW = False
self.makeDialog()
"""
def test(self, sender):
print (sender)
self.outputF.appendOutText ('ctrl_button was pressed {}'.format(sender))
"""
def makeDialog(self):
"""Create the controls for the dialog"""
# work through each ROI in turn - fit summed histogram and then convert to quanta from peaks list
self.setWindowTitle("Fit Histograms with Quantal Parameters")
#self.resize(1000, 800)
# panel for file commands and information
_fileOptions = QGroupBox("File")
_fileGrid = QGridLayout()
self.loadBtn = QPushButton('Load')
self.loadBtn.clicked.connect(self.openData)
self.loadBtn.setDisabled(True)
self.saveBtn = QPushButton('Save')
self.saveBtn.clicked.connect(self.save)
self.saveBtn.setDisabled(True)
self.doneBtn = QPushButton('Done')
self.doneBtn.clicked.connect(self.done)
self.doneBtn.setDisabled(True)
self.dataname_label = QtGui.QLabel("No data")
self.autoSaveSwitch = QCheckBox('Auto-save fit results')
self.autoSaveSwitch.setChecked(True)
self.autoSaveSwitch.stateChanged.connect(self.toggleAutoSave)
self.sHFCurvesSwitch = QCheckBox('Save Histogram Fitted Curves')
self.sHFCurvesSwitch.setChecked(True)
self.sHFCurvesSwitch.stateChanged.connect(self.toggleSaveFits)
_fileGrid.addWidget(self.loadBtn, 0, 0, 1, 2)
_fileGrid.addWidget(self.saveBtn, 0, 2, 1, 2)
_fileGrid.addWidget(self.doneBtn, 0, 4, 1, 2)
_fileGrid.addWidget(self.autoSaveSwitch, 2, 0, 1, 3)
_fileGrid.addWidget(self.dataname_label, 1, 0, 1, -1)
_fileGrid.addWidget(self.sHFCurvesSwitch, 2, 3, 1, 3)
_fileOptions.setLayout(_fileGrid)
# panel of display options for histograms
_histOptions = QGroupBox("Histogram options")
_histGrid = QGridLayout()
_NBin_label = QtGui.QLabel("No. of bins")
_NBin_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.histo_NBin_Spin = pg.SpinBox(value=100, step=10, bounds=[10, 250], delay=0)
self.histo_NBin_Spin.setFixedSize(80, 25)
self.histo_NBin_Spin.valueChanged.connect(self.updateHistograms)
_histMax_label = QtGui.QLabel("Max dF/F")
_histMax_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.histo_Max_Spin = pg.SpinBox(value=1, step=0.1, bounds=[0.1, 10], delay=0, int=False)
self.histo_Max_Spin.setFixedSize(80, 25)
self.histo_Max_Spin.valueChanged.connect(self.setManualMax)
# toggle show ROI histogram sum
_histsum_label = QtGui.QLabel("Display histograms")
_histsum_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.sum_hist_option = pg.ComboBox()
self.sum_hist_option.setFixedSize(100,25)
self.sum_hist_option.addItems(['Summed', 'Separated'])
self.split_state = False #because summed is first in list and default
self.sum_hist_option.currentIndexChanged.connect(self.updateHistograms)
_histGrid.addWidget(_NBin_label, 2, 0)
_histGrid.addWidget(_histMax_label, 1, 0)
_histGrid.addWidget(_histsum_label, 0, 0)
_histGrid.addWidget(self.histo_NBin_Spin, 2, 1)
_histGrid.addWidget(self.histo_Max_Spin, 1, 1)
_histGrid.addWidget(self.sum_hist_option, 0, 1)
_histOptions.setLayout(_histGrid)
# panel of fit parameters and commands
_fittingPanel = QGroupBox("Fitting")
_fitGrid = QGridLayout()
#_globFit_label = QtGui.QLabel("find N, Pr; common dF (q), w")
_bruteNFitBtn = QPushButton('Find N by brute force summed')
_bruteNFitBtn.clicked.connect(self.bruteFindNFromSummed)
_hist_W_label = QtGui.QLabel("Gaussian widths (from SD)")
self.histo_W_Spin = pg.SpinBox(value=self.ROI_SD, step=0.005, delay=0, int=False)
self.histo_W_Spin.setFixedSize(80, 25)
_hist_nG_label = QtGui.QLabel("Gaussian components")
#_hist_nG_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.histo_nG_Spin = pg.SpinBox(value=6, step=1, bounds=[1,20], delay=0, int=True)
self.histo_nG_Spin.setFixedSize(50, 25)
self.histo_nG_Spin.setAlignment(QtCore.Qt.AlignRight)
#print (self.histo_nG_Spin.alignment())
#self.histo_nG_Spin.valueChanged.connect(self.updateHistograms) ###SHOULD IT?
_hist_q_label = QtGui.QLabel("Quantal size (dF)")
#_hist_q_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.histo_q_Spin = pg.SpinBox(value=.05, step=0.005, bounds=[0.005,1], delay=0, int=False)
self.histo_q_Spin.setFixedSize(80, 25)
self.histo_q_Spin.setAlignment(QtCore.Qt.AlignRight)
#self.histo_q_Spin.valueChanged.connect(self.updateHistograms)
self.fixWtoSDSwitch = QCheckBox('Fix W according to SD')
self.fixWtoSDSwitch.setChecked(False)
self.fixWtoSDSwitch.setDisabled(True)
self.fixWtoSDSwitch.stateChanged.connect(self.toggleFixW)
_separateBinomial_label = QtGui.QLabel("fixed dF (q), w")
self.separateBinomialBtn = QPushButton('Separate Binomial fits')
self.separateBinomialBtn.clicked.connect(self.separateBinomialFits)
self.separateBinomialBtn.setDisabled(True)
_sumFit_label = QtGui.QLabel("free dF (q), w, amplitudes")
_doFitBtn = QPushButton('Fit Summed')
_doFitBtn.clicked.connect(self.fitGaussians)
_globFit_label = QtGui.QLabel("find N, Pr; common dF (q), w")
_globFitBtn = QPushButton('Global Binomial fit')
_globFitBtn.clicked.connect(self.binomialFitGlobalGaussians)
_PoissonGlobalFit_label = QtGui.QLabel("find mu; common dF (q), w")
_PoissonGlobalFitBtn = QPushButton('Global Poisson fit')
_PoissonGlobalFitBtn.clicked.connect(self.poissonFitGlobalGaussians)
_fitInfoLabel = QtGui.QLabel("Fits for current ROI")
_fitInfoLabel.setFixedSize(120,20)
self.fitInfo = txOutput(self.fitInfoHeader)
self.fitInfo.size(480, 130)
_fitGrid.addWidget(_hist_nG_label, 1, 0)
_fitGrid.addWidget(self.histo_nG_Spin, 1, 1)
_fitGrid.addWidget(_hist_q_label, 2, 0)
_fitGrid.addWidget(self.histo_q_Spin, 2, 1)
_fitGrid.addWidget(_hist_W_label, 3, 0)
_fitGrid.addWidget(self.histo_W_Spin, 3, 1)
_fitGrid.addWidget(self.fixWtoSDSwitch, 4, 0, 1 ,2)
_fitGrid.addWidget(_bruteNFitBtn, 0, 0, 1, 3)
_fitGrid.addWidget(_doFitBtn, 1, 2, 1, 1)
_fitGrid.addWidget(_sumFit_label, 1, 3, 1, 1)
_fitGrid.addWidget(self.separateBinomialBtn, 2, 2, 1, 1)
_fitGrid.addWidget(_separateBinomial_label, 2, 3, 1, 1)
_fitGrid.addWidget(_globFitBtn, 3, 2, 1, 1)
_fitGrid.addWidget(_globFit_label, 3, 3, 1, 1)
_fitGrid.addWidget(_PoissonGlobalFitBtn, 4, 2, 1, 1)
_fitGrid.addWidget(_PoissonGlobalFit_label, 4, 3, 1, 1)
_fitGrid.addWidget(_fitInfoLabel, 0, 5, 1, 1)
_fitGrid.addWidget(self.fitInfo.frame, 1, 5, -1, 1)
_fittingPanel.setLayout(_fitGrid)
# histogram analysis layout
self.hlayout = QGridLayout()
_secondcolW = 450
# histogram view
self.histogramLayPos = (0, 0, 4, 1)
self.hlayout.addWidget(self.hPlot.glw, *self.histogramLayPos)
# ROI controls
self.RC = ROI_Controls(self) #need to send instance as parent
self.RC.ROI_box.setFixedSize(_secondcolW, 120)
self.hlayout.addWidget(self.RC.ROI_box, 1, 1, 1, 1)
# Display options for the histograms
_histOptions.setFixedSize(_secondcolW, 120)
self.hlayout.addWidget(_histOptions, 3, 1, 1, 1)
# File controls
_fileOptions.setFixedSize(_secondcolW, 120)
self.hlayout.addWidget(_fileOptions, 0, 1, 1, 1)
# Text output console
self.outputF.frame.setFixedSize(_secondcolW, 250)
self.hlayout.addWidget(self.outputF.frame, 2, 1, 1, 1)
# Fitting controls and display
_fittingPanel.setFixedHeight(200)
self.hlayout.addWidget(_fittingPanel, 4, 0, 1, -1)
self.setLayout(self.hlayout)
def setManualMax(self):
self.maxFlag = "Manual max"
self.updateHistograms()
def histogramParameters(self, verbose=False):
""" read bins from GUI and optionally read or calculate x (F) max"""
_nbins = int(self.histo_NBin_Spin.value())
_hsumOpt = self.sum_hist_option.currentText()
if self.maxFlag == "Manual max":
_max = self.histo_Max_Spin.value()
if verbose:
self.outputF.appendOutText ("N_bins {}, manual Max {}".format(_nbins, _max))
elif self.maxFlag == "Auto max":
_max = 0
_ROI = self.current_ROI
for _condition in self.peakResults.keys():
_peaks = self.peakResults[_condition][_ROI]
if _peaks.max() > _max * 1.2:
_max = _peaks.max() * 1.2
if verbose:
self.outputF.appendOutText ("N_bins {}, auto Max {}".format(_nbins, _max))
return _hsumOpt, _nbins, _max
def clearFits(self):
self.outputF.appendOutText ("Discarded fit results from {}".format(self.current_ROI), "red")
# clear current fits and info frame
self.currentROIFits = pd.DataFrame(columns=self.currentROIFits.columns)
self.fitInfo.reset(self.fitInfoHeader)
def skipROI(self):
#self.clearFits()
self.ROI_change_command(2)
self.outputF.appendOutText ("Advance to next ROI: {}".format(self.current_ROI), "magenta")
def toggleAutoSave(self):
if self.autoSaveSwitch.isChecked() == False:
self.autoSave = False
else:
self.autoSave = True
print ("AutoSave is {}.".format(self.autoSave))
def toggleSaveFits(self):
if self.sHFCurvesSwitch.isChecked() == False:
self.saveFits = False
else:
self.saveFits = True
print ("SaveFitsToggle is {}.".format(self.saveFits))
def toggleFixW(self):
if self.fixWtoSDSwitch.isChecked() == False:
self.fixW = False
self.histo_W_Spin.setDisabled(False)
else:
self.fixW = True
self.histo_W_Spin.setDisabled(True)
print ("FixWToggle is {}.".format(self.fixW))
def done(self, *arg):
#print ("done arg {}".format(arg)) ## recursion limit
try:
self.accept() # works if the dialog was called from elsewhere
except:
print ("Bye.")
self.hide() # works if the dialog was called standalone
def save(self, auto=False):
#maybe we just have a filename not a path
## override when autosaving
if auto:
_saveFilename = "HFtemp.xlsx"
# Save was requested by the user
else:
self.outputF.appendOutText ("Keeping {} fit results for {} --\n".format(len(self.currentROIFits.index),self.current_ROI), "Magenta")
self.fitResults = self.fitResults.append(copy.copy(self.currentROIFits), ignore_index=True)
usr = os.path.expanduser("~")
if self.filename:
if os.path.split(self.filename)[0] is not None:
_outfile = os.path.join(usr,"HFit_" + os.path.split(self.filename)[1])
else:
_outfile = "HFit_" + self.filename
elif self.dataname:
_outfile = os.path.join(usr,"HFit_" + self.dataname + ".xlsx")
else:
_outfile = usr
_saveFilename = QFileDialog.getSaveFileName(self,
"Save Results", _outfile)[0]
if _saveFilename == None:
print ("File dialog failed.")
return
#print ("sfn : {}".format(_saveFilename))
with pd.ExcelWriter(_saveFilename) as writer:
#print (self.fitResults.head(5))
self.fitResults.to_excel(writer, sheet_name="Fit Results", startrow=1)
#optionally all fitted curves are saved
if self.saveFits:
#repack as dict of dataframes
fits_ddf = {}
for k,v in self.Fits_data.items():
fits_ddf[k] = v.df
_fitsDF = pd.concat(fits_ddf, axis=1)
_fitsDF.columns.rename(["Fit ID", "Condition", "Coordinate"], inplace=True )
#print (_fitsDF.head(5))
_fitsDF.to_excel(writer, sheet_name="Histograms & Fitted Curves", startrow=1)
if auto:
print ("Autosaved to {}".format(_saveFilename))
else:
print ("Wrote {} to disk.".format(_outfile))
self.outputF.appendOutText ("Wrote fit results out to disk: {}".format(_outfile))
def storeAdvance(self):
"""storing data and moving forward one ROI"""
self.outputF.appendOutText ("Keeping {} fit results for {} --\n".format(len(self.currentROIFits.index),self.current_ROI), "Magenta")
self.fitResults = self.fitResults.append(copy.copy(self.currentROIFits), ignore_index=True)
if self.autoSave:
self.save(auto=True)
# empty the current fits dataframe
self.currentROIFits = | pd.DataFrame(columns=self.currentROIFits.columns) | pandas.DataFrame |
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = | Categorical(["b", np.nan, "a"], categories=["b", "a"]) | pandas.Categorical |
import numbers
import warnings
import pandas as pd
from pandas.api.types import is_numeric_dtype
from scipy import stats
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import preprocessing
def is_value_numeric(value):
"""Return if the passed value is numeric"""
return isinstance(value, numbers.Number)
def is_value_categorical(value):
"""Return if a value is categorical"""
return type(value) == str
def round_dict_values(dictionary, decimals):
"""Round all the numeric values in the passed dictionary"""
for key, value in dictionary.items():
if is_value_numeric(value):
dictionary[key] = round(value, decimals)
return dictionary
def is_series_numeric(series):
"""Return whether the passed series is numeric (not categorical)"""
return is_numeric_dtype(series)
def is_series_categorical(series):
"""Return whether the passed series is categorical (not numeric)"""
return not is_numeric_dtype(series)
def get_numeric_column_names(data_frame):
"""Return the names of the numeric columns found in the passed data frame"""
numeric_columns = list()
for column in data_frame.columns:
if is_series_numeric(data_frame[column]):
numeric_columns.append(column)
return numeric_columns
def get_categorical_column_names(data_frame):
"""Return the names of the categorical columns found in the passed data frame"""
categorical_columns = list()
for column in data_frame.columns:
if is_series_categorical(data_frame[column]):
categorical_columns.append(column)
return categorical_columns
def read_data_sets(data_dir_path, data_set_names):
"""Read and return as data frames the data sets under the passed directory path that have the passed names"""
data_frames = list()
# parse the data sets
for data_set_name in data_set_names:
data_frame = pd.read_csv(data_dir_path + data_set_name + ".csv", na_values=["?"])
data_frame.name = data_set_name
data_frames.append(data_frame)
return data_frames
def pre_process_data_frame(data_frame, convert_categorical_to_numeric=False):
"""Pre-process the passed data frame"""
# replace the missing values
data_frame = replace_missing_values(data_frame)
# normalize numeric columns
data_frame = normalize_numeric_columns(data_frame)
# convert categorical attributes to numeric (with 1-hot encoding)
if convert_categorical_to_numeric:
data_frame = convert_categorical_columns_to_numeric(data_frame)
return data_frame
def replace_missing_values(data_frame):
"""Return the passed data frame with their missing values replaced with the mean of the attribute"""
# replace NaNs with column means
data_frame = data_frame.apply(lambda column: fill_nan(column))
return data_frame
def normalize_numeric_columns(data_frame):
""" Return the passed data frame with the numeric columns normalized with values between 0 and 1"""
for column in get_numeric_column_names(data_frame):
data_frame[column] = preprocessing.minmax_scale(data_frame[column])
return data_frame
def fill_nan(series):
"""Fill the NaN values in an series, with the mean (if numeric) or mode (if categorical), and return the result"""
# ignore irrelevant warnings about NaN presence
warnings.simplefilter("ignore")
# use mean for numeric attributes
if is_series_numeric(series):
return series.fillna(series.mean())
# use mode for categorical attributes
return series.fillna(stats.mode(series)[0][0])
def convert_categorical_columns_to_numeric(data_frame):
"""Return the passed data frame with categorical columns substituted by binary 1-hot encoding columns"""
# the class column should not be modified
class_column_name = get_class_column_name(data_frame)
categorical_column_names_without_class = get_categorical_column_names(data_frame)
categorical_column_names_without_class.remove(class_column_name)
# apply 1-hot encoding
data_frame = | pd.get_dummies(data_frame, columns=categorical_column_names_without_class) | pandas.get_dummies |
import pandas as pd
def get_prev_user(file, ind):
prev_user = None
while True:
if ind != 0:
temp = file.iloc[ind - 1, :]
if pd.isnull(temp['fullname']) or ( | pd.isnull(temp['Gerrit']) | pandas.isnull |
from itertools import product
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from linearmodels.iv.model import IV2SLS
from linearmodels.panel.data import PanelData
from linearmodels.panel.model import PanelOLS, PooledOLS
from linearmodels.panel.utility import AbsorbingEffectWarning
from linearmodels.shared.exceptions import MemoryWarning
from linearmodels.shared.hypotheses import WaldTestStatistic
from linearmodels.shared.utility import AttrDict
from linearmodels.tests.panel._utility import (
access_attributes,
assert_frame_similar,
assert_results_equal,
datatypes,
generate_data,
)
pytestmark = pytest.mark.filterwarnings(
"ignore::linearmodels.shared.exceptions.MissingValueWarning",
"ignore:the matrix subclass:PendingDeprecationWarning",
)
perc_missing = [0.0, 0.02, 0.20]
has_const = [True, False]
perms = list(product(perc_missing, datatypes, has_const))
ids = ["-".join(str(param) for param in perms) for perm in perms]
@pytest.fixture(params=perms, ids=ids)
def data(request):
missing, datatype, const = request.param
return generate_data(
missing, datatype, const=const, ntk=(91, 15, 5), other_effects=2
)
@pytest.fixture(params=["numpy", "pandas"])
def absorbed_data(request):
datatype = request.param
rng = np.random.RandomState(12345)
data = generate_data(0, datatype, ntk=(131, 4, 3), rng=rng)
x = data.x
if isinstance(data.x, np.ndarray):
absorbed = np.arange(x.shape[2])
absorbed = np.tile(absorbed, (1, x.shape[1], 1))
data.x = np.concatenate([data.x, absorbed])
elif isinstance(data.x, pd.DataFrame):
codes = data.x.index.codes
absorbed = np.array(codes[0]).astype(np.double)
data.x["x_absorbed"] = absorbed
return data
@pytest.fixture(params=perms, ids=ids)
def large_data(request):
missing, datatype, const = request.param
return generate_data(
missing, datatype, const=const, ntk=(51, 71, 5), other_effects=2
)
singleton_ids = [i for i, p in zip(ids, perms) if p[1] == "pandas" and not p[-1]]
singleton_perms = [p for p in perms if p[1] == "pandas" and not p[-1]]
@pytest.fixture(params=singleton_perms, ids=singleton_ids)
def singleton_data(request):
missing, datatype, const = request.param
return generate_data(
missing,
datatype,
const=const,
ntk=(91, 15, 5),
other_effects=2,
num_cats=[5 * 91, 15],
)
const_perms = list(product(perc_missing, datatypes))
const_ids = ["-".join(str(val) for val in perm) for perm in const_perms]
@pytest.fixture(params=const_perms, ids=const_ids)
def const_data(request):
missing, datatype = request.param
data = generate_data(missing, datatype, ntk=(91, 7, 1))
y = PanelData(data.y).dataframe
x = y.copy()
x.iloc[:, :] = 1
x.columns = ["Const"]
return AttrDict(y=y, x=x, w=PanelData(data.w).dataframe)
@pytest.fixture(params=[True, False])
def entity_eff(request):
return request.param
@pytest.fixture(params=[True, False])
def time_eff(request):
return request.param
lsdv_perms = [
p
for p in product([True, False], [True, False], [True, False], [0, 1, 2])
if sum(p[1:]) <= 2
]
lsdv_ids = []
for p in lsdv_perms:
str_id = "weighted" if p[0] else "unweighted"
str_id += "-entity_effects" if p[1] else ""
str_id += "-time_effects" if p[2] else ""
str_id += "-{0}_other_effects".format(p[3]) if p[3] else ""
lsdv_ids.append(str_id)
@pytest.fixture(params=lsdv_perms, ids=lsdv_ids)
def lsdv_config(request):
weights, entity_effects, time_effects, other_effects = request.param
return AttrDict(
weights=weights,
entity_effects=entity_effects,
time_effects=time_effects,
other_effects=other_effects,
)
def test_const_data_only(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x)
res = mod.fit(debiased=False)
res2 = IV2SLS(y, x, None, None).fit()
assert_allclose(res.params, res2.params)
def test_const_data_only_weights(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x, weights=const_data.w)
res = mod.fit(debiased=False)
res2 = IV2SLS(y, x, None, None, weights=const_data.w).fit()
assert_allclose(res.params, res2.params)
def test_const_data_entity(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x, entity_effects=True)
res = mod.fit(debiased=False)
x = mod.exog.dataframe
d = mod.dependent.dummies("entity", drop_first=True)
d.iloc[:, :] = d.values - x.values @ lstsq(x.values, d.values, rcond=None)[0]
xd = np.c_[x.values, d.values]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d.columns))
res2 = IV2SLS(mod.dependent.dataframe, xd, None, None).fit()
assert_allclose(res.params, res2.params.iloc[:1])
def test_const_data_time(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x, time_effects=True)
res = mod.fit(debiased=False)
x = mod.exog.dataframe
d = mod.dependent.dummies("time", drop_first=True)
d.iloc[:, :] = d.values - x.values @ lstsq(x.values, d.values, rcond=None)[0]
xd = np.c_[x.values, d.values]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d.columns))
res2 = IV2SLS(mod.dependent.dataframe, xd, None, None).fit()
assert_allclose(res.params, res2.params.iloc[:1])
@pytest.mark.parametrize("entity", [True, False])
def test_const_data_single_effect_weights(const_data, entity):
y, x = const_data.y, const_data.x
mod = PanelOLS(
y, x, entity_effects=entity, time_effects=not entity, weights=const_data.w
)
res = mod.fit(debiased=False)
y = mod.dependent.dataframe
w = mod.weights.dataframe
x = mod.exog.dataframe
dummy_type = "entity" if entity else "time"
d = mod.dependent.dummies(dummy_type, drop_first=True)
d_columns = list(d.columns)
root_w = np.sqrt(w.values)
z = np.ones_like(x)
wd = root_w * d.values
wz = root_w
d = d - z @ lstsq(wz, wd, rcond=None)[0]
xd = np.c_[x.values, d.values]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + d_columns)
res2 = IV2SLS(y, xd, None, None, weights=w).fit()
assert_allclose(res.params, res2.params.iloc[:1])
def test_const_data_both(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x, entity_effects=True, time_effects=True)
res = mod.fit(debiased=False)
x = mod.exog.dataframe
d1 = mod.dependent.dummies("entity", drop_first=True)
d1.columns = ["d.entity.{0}".format(i) for i in d1]
d2 = mod.dependent.dummies("time", drop_first=True)
d2.columns = ["d.time.{0}".format(i) for i in d2]
d = np.c_[d1.values, d2.values]
d = pd.DataFrame(d, index=x.index, columns=list(d1.columns) + list(d2.columns))
d.iloc[:, :] = d.values - x.values @ lstsq(x.values, d.values, rcond=None)[0]
xd = np.c_[x.values, d.values]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d.columns))
res2 = IV2SLS(mod.dependent.dataframe, xd, None, None).fit()
assert_allclose(res.params, res2.params.iloc[:1])
def test_const_data_both_weights(const_data):
y, x = const_data.y, const_data.x
mod = PanelOLS(y, x, entity_effects=True, time_effects=True, weights=const_data.w)
res = mod.fit(debiased=False)
w = mod.weights.dataframe
x = mod.exog.dataframe
d1 = mod.dependent.dummies("entity", drop_first=True)
d1.columns = ["d.entity.{0}".format(i) for i in d1]
d2 = mod.dependent.dummies("time", drop_first=True)
d2.columns = ["d.time.{0}".format(i) for i in d2]
d = np.c_[d1.values, d2.values]
root_w = np.sqrt(w.values)
z = np.ones_like(x)
wd = root_w * d
wz = root_w
d = d - z @ lstsq(wz, wd, rcond=None)[0]
d = pd.DataFrame(d, index=x.index, columns=list(d1.columns) + list(d2.columns))
xd = np.c_[x.values, d.values]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d.columns))
res2 = IV2SLS(mod.dependent.dataframe, xd, None, None, weights=w).fit()
assert_allclose(res.params, res2.params.iloc[:1])
def test_panel_no_effects(data):
panel = PanelOLS(data.y, data.x)
assert panel._collect_effects().shape[1] == 0
res = panel.fit()
res2 = PooledOLS(data.y, data.x).fit()
assert_results_equal(res, res2)
def test_panel_no_effects_weighted(data):
res = PanelOLS(data.y, data.x, weights=data.w).fit()
res2 = PooledOLS(data.y, data.x, weights=data.w).fit()
assert_results_equal(res, res2)
def test_panel_entity_lsdv(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
if mod.has_constant:
d = mod.dependent.dummies("entity", drop_first=True)
z = np.ones_like(y)
d_demean = d.values - z @ lstsq(z, d.values, rcond=None)[0]
else:
d = mod.dependent.dummies("entity", drop_first=False)
d_demean = d.values
xd = np.c_[x.values, d_demean]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d.columns))
ols_mod = IV2SLS(y, xd, None, None)
res2 = ols_mod.fit(cov_type="unadjusted", debiased=False)
assert_results_equal(res, res2, test_fit=False)
assert_allclose(res.rsquared_inclusive, res2.rsquared)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(data.vc1)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc2
ols_clusters = mod.reformat_clusters(data.vc2)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_entity_fwl(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
if mod.has_constant:
d = mod.dependent.dummies("entity", drop_first=True)
z = np.ones_like(y)
d_demean = d.values - z @ lstsq(z, d.values, rcond=None)[0]
else:
d = mod.dependent.dummies("entity", drop_first=False)
d_demean = d.values
x = x - d_demean @ lstsq(d_demean, x, rcond=None)[0]
y = y - d_demean @ lstsq(d_demean, y, rcond=None)[0]
ols_mod = IV2SLS(y, x, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_df=False)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_df=False)
def test_panel_time_lsdv(large_data):
mod = PanelOLS(large_data.y, large_data.x, time_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
d = mod.dependent.dummies("time", drop_first=mod.has_constant)
d_cols = list(d.columns)
d = d.values
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + d_cols)
ols_mod = IV2SLS(y, xd, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
assert_allclose(res.rsquared_inclusive, res2.rsquared)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = large_data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = large_data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_time_fwl(data):
mod = PanelOLS(data.y, data.x, time_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
d = mod.dependent.dummies("time", drop_first=mod.has_constant)
d = d.values
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
x = x - d @ lstsq(d, x, rcond=None)[0]
y = y - d @ lstsq(d, y, rcond=None)[0]
ols_mod = IV2SLS(y, x, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_df=False)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_df=False)
def test_panel_both_lsdv(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
d1 = mod.dependent.dummies("entity", drop_first=mod.has_constant)
d2 = mod.dependent.dummies("time", drop_first=True)
d = np.c_[d1.values, d2.values]
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(
xd, index=x.index, columns=list(x.columns) + list(d1.columns) + list(d2.columns)
)
ols_mod = IV2SLS(y, xd, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
assert_allclose(res.rsquared_inclusive, res2.rsquared)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_both_fwl(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
d1 = mod.dependent.dummies("entity", drop_first=mod.has_constant)
d2 = mod.dependent.dummies("time", drop_first=True)
d = np.c_[d1.values, d2.values]
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
x = x - d @ lstsq(d, x, rcond=None)[0]
y = y - d @ lstsq(d, y, rcond=None)[0]
ols_mod = IV2SLS(y, x, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_df=False)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_df=False)
def test_panel_entity_lsdv_weighted(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, weights=data.w)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
w = mod.weights.dataframe
d = mod.dependent.dummies("entity", drop_first=mod.has_constant)
d_cols = d.columns
d = d.values
if mod.has_constant:
z = np.ones_like(y)
root_w = np.sqrt(w.values)
wd = root_w * d
wz = root_w * z
d = d - z @ lstsq(wz, wd, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d_cols))
ols_mod = IV2SLS(y, xd, None, None, weights=w)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
assert_allclose(res.rsquared_inclusive, res2.rsquared)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_time_lsdv_weighted(large_data):
mod = PanelOLS(large_data.y, large_data.x, time_effects=True, weights=large_data.w)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
w = mod.weights.dataframe
d = mod.dependent.dummies("time", drop_first=mod.has_constant)
d_cols = d.columns
d = d.values
if mod.has_constant:
z = np.ones_like(y)
root_w = np.sqrt(w.values)
wd = root_w * d
wz = root_w * z
d = d - z @ lstsq(wz, wd, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d_cols))
ols_mod = IV2SLS(y, xd, None, None, weights=w)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = large_data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = large_data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_both_lsdv_weighted(data):
mod = PanelOLS(
data.y, data.x, entity_effects=True, time_effects=True, weights=data.w
)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
w = mod.weights.dataframe
d1 = mod.dependent.dummies("entity", drop_first=mod.has_constant)
d2 = mod.dependent.dummies("time", drop_first=True)
d = np.c_[d1.values, d2.values]
if mod.has_constant:
z = np.ones_like(y)
root_w = np.sqrt(w.values)
wd = root_w * d
wz = root_w * z
d = d - z @ lstsq(wz, wd, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(
xd, index=x.index, columns=list(x.columns) + list(d1.columns) + list(d2.columns)
)
ols_mod = IV2SLS(y, xd, None, None, weights=w)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
assert_allclose(res.rsquared_inclusive, res2.rsquared)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_entity_other_equivalence(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit()
y = mod.dependent.dataframe
x = mod.exog.dataframe
cats = pd.DataFrame(mod.dependent.entity_ids, index=mod.dependent.index)
mod2 = PanelOLS(y, x, other_effects=cats)
res2 = mod2.fit()
assert_results_equal(res, res2)
assert "Model includes 1 other effect" in res2.summary.as_text()
def test_panel_time_other_equivalence(data):
mod = PanelOLS(data.y, data.x, time_effects=True)
res = mod.fit()
y = mod.dependent.dataframe
x = mod.exog.dataframe
cats = pd.DataFrame(mod.dependent.time_ids, index=mod.dependent.index)
mod2 = PanelOLS(y, x, other_effects=cats)
res2 = mod2.fit()
assert_results_equal(res, res2)
assert "Model includes 1 other effect" in res2.summary.as_text()
def test_panel_entity_time_other_equivalence(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit()
y = mod.dependent.dataframe
x = mod.exog.dataframe
c1 = mod.dependent.entity_ids
c2 = mod.dependent.time_ids
cats = np.c_[c1, c2]
cats = pd.DataFrame(cats, index=mod.dependent.index)
mod2 = PanelOLS(y, x, other_effects=cats)
res2 = mod2.fit()
assert_results_equal(res, res2)
assert "Model includes 2 other effects" in res2.summary.as_text()
def test_panel_other_lsdv(data):
mod = PanelOLS(data.y, data.x, other_effects=data.c)
assert "Num Other Effects: 2" in str(mod)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe.copy()
x = mod.exog.dataframe.copy()
c = mod._other_effect_cats.dataframe.copy()
d = []
d_columns = []
for i, col in enumerate(c):
s = c[col].copy()
dummies = pd.get_dummies(
s.astype(np.int64), drop_first=(mod.has_constant or i > 0)
)
dummies.columns = [s.name + "_val_" + str(c) for c in dummies.columns]
d_columns.extend(list(dummies.columns))
d.append(dummies.values)
d = np.column_stack(d)
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
xd = np.c_[x.values, d]
xd = pd.DataFrame(xd, index=x.index, columns=list(x.columns) + list(d_columns))
ols_mod = IV2SLS(y, xd, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_fit=False)
res3 = mod.fit(
cov_type="unadjusted", auto_df=False, count_effects=False, debiased=False
)
assert_results_equal(res, res3)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc1
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
clusters = data.vc2
ols_clusters = mod.reformat_clusters(clusters)
res = mod.fit(
cov_type="clustered",
clusters=clusters,
auto_df=False,
count_effects=False,
debiased=False,
)
res2 = ols_mod.fit(cov_type="clustered", clusters=ols_clusters.dataframe)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_time=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.time_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
res = mod.fit(
cov_type="clustered",
cluster_entity=True,
auto_df=False,
count_effects=False,
debiased=False,
)
clusters = pd.DataFrame(
mod.dependent.entity_ids, index=mod.dependent.index, columns=["var.clust"]
)
res2 = ols_mod.fit(cov_type="clustered", clusters=clusters)
assert_results_equal(res, res2, test_fit=False)
def test_panel_other_fwl(data):
mod = PanelOLS(data.y, data.x, other_effects=data.c)
res = mod.fit(auto_df=False, count_effects=False, debiased=False)
y = mod.dependent.dataframe
x = mod.exog.dataframe
c = mod._other_effect_cats.dataframe
d = []
d_columns = []
for i, col in enumerate(c):
s = c[col].copy()
dummies = pd.get_dummies(
s.astype(np.int64), drop_first=(mod.has_constant or i > 0)
)
dummies.columns = [s.name + "_val_" + str(c) for c in dummies.columns]
d_columns.extend(list(dummies.columns))
d.append(dummies.values)
d = np.column_stack(d)
if mod.has_constant:
z = np.ones_like(y)
d = d - z @ lstsq(z, d, rcond=None)[0]
x = x - d @ lstsq(d, x, rcond=None)[0]
y = y - d @ lstsq(d, y, rcond=None)[0]
ols_mod = IV2SLS(y, x, None, None)
res2 = ols_mod.fit(cov_type="unadjusted")
assert_results_equal(res, res2, test_df=False)
res = mod.fit(cov_type="robust", auto_df=False, count_effects=False, debiased=False)
res2 = ols_mod.fit(cov_type="robust")
assert_results_equal(res, res2, test_df=False)
def test_panel_other_incorrect_size(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
y = mod.dependent.dataframe
x = mod.exog.dataframe
cats = pd.DataFrame(mod.dependent.entity_ids, index=mod.dependent.index)
cats = PanelData(cats)
cats = cats.dataframe.iloc[: cats.dataframe.shape[0] // 2, :]
with pytest.raises(ValueError):
PanelOLS(y, x, other_effects=cats)
def test_results_access(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit()
access_attributes(res)
mod = PanelOLS(data.y, data.x, other_effects=data.c)
res = mod.fit()
access_attributes(res)
mod = PanelOLS(data.y, data.x, time_effects=True, entity_effects=True)
res = mod.fit()
access_attributes(res)
mod = PanelOLS(data.y, data.x)
res = mod.fit()
access_attributes(res)
const = PanelData(data.y).copy()
const.dataframe.iloc[:, :] = 1
const.dataframe.columns = ["const"]
mod = PanelOLS(data.y, const)
res = mod.fit()
access_attributes(res)
def test_alt_rsquared(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(debiased=False)
assert_allclose(res.rsquared, res.rsquared_within)
def test_alt_rsquared_weighted(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, weights=data.w)
res = mod.fit(debiased=False)
assert_allclose(res.rsquared, res.rsquared_within)
def test_too_many_effects(data):
with pytest.raises(ValueError):
PanelOLS(
data.y, data.x, entity_effects=True, time_effects=True, other_effects=data.c
)
def test_cov_equiv_cluster(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(cov_type="clustered", cluster_entity=True, debiased=False)
y = PanelData(data.y)
clusters = pd.DataFrame(y.entity_ids, index=y.index)
res2 = mod.fit(cov_type="clustered", clusters=clusters, debiased=False)
assert_results_equal(res, res2)
mod = PanelOLS(data.y, data.x, time_effects=True)
res = mod.fit(cov_type="clustered", cluster_time=True, debiased=False)
y = PanelData(data.y)
clusters = pd.DataFrame(y.time_ids, index=y.index)
res2 = mod.fit(cov_type="clustered", clusters=clusters, debiased=False)
assert_results_equal(res, res2)
res = mod.fit(cov_type="clustered", debiased=False)
res2 = mod.fit(cov_type="clustered", clusters=None, debiased=False)
assert_results_equal(res, res2)
@pytest.mark.smoke
def test_cluster_smoke(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
mod.fit(cov_type="clustered", cluster_time=True, debiased=False)
mod.fit(cov_type="clustered", cluster_entity=True, debiased=False)
c2 = PanelData(data.vc2)
c1 = PanelData(data.vc1)
mod.fit(cov_type="clustered", clusters=c2, debiased=False)
mod.fit(cov_type="clustered", cluster_entity=True, clusters=c1, debiased=False)
mod.fit(cov_type="clustered", cluster_time=True, clusters=c1, debiased=False)
with pytest.raises(ValueError):
mod.fit(cov_type="clustered", cluster_time=True, clusters=c2, debiased=False)
with pytest.raises(ValueError):
mod.fit(cov_type="clustered", cluster_entity=True, clusters=c2, debiased=False)
with pytest.raises(ValueError):
mod.fit(
cov_type="clustered",
cluster_entity=True,
cluster_time=True,
clusters=c1,
debiased=False,
)
with pytest.raises(ValueError):
clusters = c1.dataframe.iloc[: c1.dataframe.shape[0] // 2]
mod.fit(cov_type="clustered", clusters=clusters, debiased=False)
def test_f_pooled(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(debiased=False)
if mod.has_constant:
mod2 = PooledOLS(data.y, data.x)
else:
exog = mod.exog.dataframe.copy()
exog["Intercept"] = 1.0
mod2 = PooledOLS(mod.dependent.dataframe, exog)
res2 = mod2.fit(debiased=False)
eps = res.resids.values
eps2 = res2.resids.values
v1 = res.df_model - res2.df_model
v2 = res.df_resid
f_pool = (eps2.T @ eps2 - eps.T @ eps) / v1
f_pool /= (eps.T @ eps) / v2
f_pool = float(f_pool)
assert_allclose(res.f_pooled.stat, f_pool)
assert res.f_pooled.df == v1
assert res.f_pooled.df_denom == v2
mod = PanelOLS(data.y, data.x, time_effects=True)
res = mod.fit(debiased=False)
eps = res.resids.values
eps2 = res2.resids.values
v1 = res.df_model - res2.df_model
v2 = res.df_resid
f_pool = (eps2.T @ eps2 - eps.T @ eps) / v1
f_pool /= (eps.T @ eps) / v2
f_pool = float(f_pool)
assert_allclose(res.f_pooled.stat, f_pool)
assert res.f_pooled.df == v1
assert res.f_pooled.df_denom == v2
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit(debiased=False)
eps = res.resids.values
eps2 = res2.resids.values
v1 = res.df_model - res2.df_model
v2 = res.df_resid
f_pool = (eps2.T @ eps2 - eps.T @ eps) / v1
f_pool /= (eps.T @ eps) / v2
f_pool = float(f_pool)
assert_allclose(res.f_pooled.stat, f_pool)
assert res.f_pooled.df == v1
assert res.f_pooled.df_denom == v2
def test_entity_other(data):
y = PanelData(data.y)
x = PanelData(data.x)
c = PanelData(data.c).copy()
missing = y.isnull | x.isnull | c.isnull
y.drop(missing)
x.drop(missing)
c.drop(missing)
c_entity = c.dataframe.copy()
c_entity.iloc[:, 1] = y.entity_ids.squeeze()
c_entity = c_entity.astype(np.int64)
mod = PanelOLS(y, x, other_effects=c_entity)
res = mod.fit(debiased=False)
c_only = PanelData(c.dataframe.iloc[:, [0]].astype(np.int64))
mod2 = PanelOLS(y, x, other_effects=c_only, entity_effects=True)
res2 = mod2.fit(debiased=False)
assert_results_equal(res, res2)
@pytest.mark.smoke
def test_other_weighted_smoke(data):
mod = PanelOLS(data.y, data.x, weights=data.w, other_effects=data.c)
mod.fit(debiased=False)
@pytest.mark.slow
def test_methods_equivalent(data, lsdv_config):
other_effects = None
if lsdv_config.other_effects == 1:
other_effects = PanelData(data.c).dataframe.iloc[:, [0]]
elif lsdv_config.other_effects == 2:
other_effects = data.c
weights = data.w if lsdv_config.weights else None
mod = PanelOLS(
data.y,
data.x,
weights=weights,
entity_effects=lsdv_config.entity_effects,
time_effects=lsdv_config.time_effects,
other_effects=other_effects,
)
res1 = mod.fit()
res2 = mod.fit(use_lsdv=True)
res3 = mod.fit(use_lsmr=True)
assert_results_equal(res1, res2)
assert_results_equal(res2, res3, strict=False)
def test_rsquared_inclusive_equivalence(data):
mod = PanelOLS(data.y, data.x)
res = mod.fit()
assert_allclose(res.rsquared, res.rsquared_inclusive)
mod = PanelOLS(data.y, data.x, weights=data.w)
res = mod.fit()
assert_allclose(res.rsquared, res.rsquared_inclusive)
def test_panel_effects_sanity(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
res = mod.fit(auto_df=False, count_effects=False)
fitted = mod.exog.values2d @ res.params.values[:, None]
expected = fitted
expected += res.resids.values[:, None]
expected += res.estimated_effects.values
assert_allclose(mod.dependent.values2d, expected)
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit(auto_df=False, count_effects=False)
fitted = mod.exog.values2d @ res.params.values[:, None]
expected = fitted
expected += res.resids.values[:, None]
expected += res.estimated_effects.values
assert_allclose(mod.dependent.values2d, expected)
mod = PanelOLS(data.y, data.x, weights=data.w, entity_effects=True)
res = mod.fit(auto_df=False, count_effects=False)
fitted = mod.exog.values2d @ res.params.values[:, None]
expected = fitted
expected += res.resids.values[:, None]
expected += res.estimated_effects.values
assert_allclose(mod.dependent.values2d, expected)
mod = PanelOLS(
data.y, data.x, weights=data.w, entity_effects=True, time_effects=True
)
res = mod.fit(auto_df=False, count_effects=False)
fitted = mod.exog.values2d @ res.params.values[:, None]
expected = fitted
expected += res.resids.values[:, None]
expected += res.estimated_effects.values
assert_allclose(mod.dependent.values2d, expected)
def test_fitted_effects_residuals(data, entity_eff, time_eff):
mod = PanelOLS(data.y, data.x, entity_effects=entity_eff, time_effects=time_eff)
res = mod.fit()
expected = mod.exog.values2d @ res.params.values
expected = pd.DataFrame(expected, index=mod.exog.index, columns=["fitted_values"])
assert_allclose(res.fitted_values, expected)
assert_frame_similar(res.fitted_values, expected)
expected.iloc[:, 0] = res.resids
expected.columns = ["idiosyncratic"]
assert_allclose(res.idiosyncratic, expected)
assert_frame_similar(res.idiosyncratic, expected)
fitted_error = res.fitted_values + res.idiosyncratic.values
expected.iloc[:, 0] = mod.dependent.values2d - fitted_error
expected.columns = ["estimated_effects"]
assert_allclose(res.estimated_effects, expected, atol=1e-8)
assert_frame_similar(res.estimated_effects, expected)
@pytest.mark.parametrize("weighted", [True, False])
def test_low_memory(data, weighted):
if weighted:
mod = PanelOLS(
data.y, data.x, weights=data.w, entity_effects=True, time_effects=True
)
else:
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit()
low_mem = mod.fit(low_memory=True)
assert_allclose(res.params, low_mem.params)
def test_low_memory_auto():
x = np.random.standard_normal((1000, 1000))
e = np.random.standard_normal((1000, 1000))
eff = np.arange(1000)[:, None]
y = x + e + eff + eff.T
y = y.ravel()
x = np.reshape(x, (1000000, 1))
mi = pd.MultiIndex.from_product([np.arange(1000), np.arange(1000)])
y = pd.Series(y, index=mi)
x = pd.DataFrame(x, index=mi)
mod = PanelOLS(y, x, entity_effects=True, time_effects=True)
with pytest.warns(MemoryWarning):
mod.fit()
@pytest.mark.filterwarnings("ignore::linearmodels.shared.exceptions.SingletonWarning")
def test_singleton_removal():
entities = []
for i in range(6):
entities.extend(["entity.{j}".format(j=j) for j in range(6 - i)])
nobs = len(entities)
times = np.arange(nobs) % 6
index = pd.MultiIndex.from_arrays((entities, times))
cols = ["x{0}".format(i) for i in range(3)]
x = pd.DataFrame(np.random.randn(nobs, 3), index=index, columns=cols)
y = pd.DataFrame(np.random.randn(nobs, 1), index=index)
mod = PanelOLS(y, x, singletons=False, entity_effects=True, time_effects=True)
res = mod.fit()
mod = PanelOLS(y, x, singletons=True, entity_effects=True, time_effects=True)
res_with = mod.fit()
assert_allclose(res.params, res_with.params)
@pytest.mark.filterwarnings("ignore::linearmodels.shared.exceptions.SingletonWarning")
def test_masked_singleton_removal():
nobs = 8
entities = ["A", "B", "C", "D"] * 2
times = [0, 1, 1, 1, 1, 2, 2, 2]
index = pd.MultiIndex.from_arrays((entities, times))
x = pd.DataFrame(np.random.randn(nobs, 1), index=index, columns=["x"])
y = pd.DataFrame(np.random.randn(nobs, 1), index=index)
mod = PanelOLS(y, x, singletons=False, entity_effects=True, time_effects=True)
res = mod.fit()
assert res.nobs == 6
def test_singleton_removal_other_effects(data):
mod_keep = PanelOLS(
data.y, data.x, weights=data.w, other_effects=data.c, singletons=True
)
res_keep = mod_keep.fit()
mod = PanelOLS(
data.y, data.x, weights=data.w, other_effects=data.c, singletons=False
)
res = mod.fit(cov_type="clustered", clusters=data.vc1)
assert res.nobs <= res_keep.nobs
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::linearmodels.shared.exceptions.SingletonWarning")
@pytest.mark.parametrize("other_effects", [1, 2])
def test_singleton_removal_mixed(singleton_data, other_effects):
if other_effects == 1:
other_effects = PanelData(singleton_data.c).dataframe.iloc[:, [0]]
elif other_effects == 2:
other_effects = singleton_data.c
mod = PanelOLS(singleton_data.y, singleton_data.x, other_effects=other_effects)
res_keep = mod.fit(use_lsmr=True)
mod = PanelOLS(
singleton_data.y,
singleton_data.x,
other_effects=other_effects,
singletons=False,
)
res = mod.fit(cov_type="clustered", clusters=singleton_data.vc2, use_lsmr=True)
assert_allclose(res_keep.params, res.params)
assert res.nobs <= res_keep.nobs
def test_repeated_measures_weight():
# Issue reported by email
rs = np.random.RandomState(0)
w = rs.chisquare(5, 300) / 5
idx1 = ["a"] * 100 + ["b"] * 100 + ["c"] * 100
idx2 = np.arange(300) % 25
mi = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(rs.standard_normal((300, 2)), index=mi, columns=["y", "x"])
w = | pd.Series(w, index=mi, name="weight") | pandas.Series |
"""
This is the main script to be run from the directory root, it will start the Flask application running which one can
then connect to.
"""
# external packages
from astrodbkit2.astrodb import Database, REFERENCE_TABLES # used for pulling out database and querying
from astropy.coordinates import SkyCoord
from astropy.table import Table # tabulating
from bokeh.embed import json_item # bokeh embedding
from bokeh.layouts import row, column # bokeh displaying nicely
from bokeh.models import ColumnDataSource, Range1d, CustomJS,\
Select, Toggle, TapTool, OpenURL, HoverTool # bokeh models
from bokeh.plotting import figure, curdoc # bokeh plotting
from flask import Flask, render_template, request, redirect, url_for, jsonify # website functionality
from flask_cors import CORS # cross origin fix (aladin mostly)
from flask_wtf import FlaskForm # web forms
from markdown2 import markdown # using markdown formatting
import numpy as np # numerical python
import pandas as pd # running dataframes
from wtforms import StringField, SubmitField # web forms
from wtforms.validators import DataRequired, StopValidation # validating web forms
# internal packages
import argparse # system arguments
import os # operating system
from typing import Union, List # type hinting
from urllib.parse import quote # handling strings into url friendly form
# local packages
from simple_callbacks import JSCallbacks
# initialise
app_simple = Flask(__name__) # start flask app
app_simple.config['SECRET_KEY'] = os.urandom(32) # need to generate csrf token as basic security for Flask
CORS(app_simple) # makes CORS work (aladin notably)
def sysargs():
"""
These are the system arguments given after calling this python script
Returns
-------
_args
The different argument parameters, can be grabbed via their long names (e.g. _args.host)
"""
_args = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
_args.add_argument('-i', '--host', default='127.0.0.1',
help='Local IP Address to host server, default 127.0.0.1')
_args.add_argument('-p', '--port', default=8000,
help='Local port number to host server through, default 8000', type=int)
_args.add_argument('-d', '--debug', help='Run Flask in debug mode?', default=False, action='store_true')
_args.add_argument('-f', '--file', default='SIMPLE.db',
help='Database file path relative to current directory, default SIMPLE.db')
_args = _args.parse_args()
return _args
class SimpleDB(Database): # this keeps pycharm happy about unresolved references
"""
Wrapper class for astrodbkit2.Database specific to SIMPLE
"""
Sources = None # initialise class attribute
Photometry = None
Parallaxes = None
class Inventory:
"""
For use in the solo result page where the inventory of an object is queried, grabs also the RA & Dec
"""
ra: float = 0
dec: float = 0
def __init__(self, resultdict: dict):
"""
Constructor method for Inventory
Parameters
----------
resultdict: dict
The dictionary of all the key: values in a given object inventory
"""
self.results: dict = resultdict # given inventory for a target
for key in self.results: # over every key in inventory
if args.debug:
print(key)
if key in REFERENCE_TABLES: # ignore the reference table ones
continue
lowkey: str = key.lower() # lower case of the key
mkdown_output: str = self.listconcat(key) # get in markdown the dataframe value for given key
setattr(self, lowkey, mkdown_output) # set the key attribute with the dataframe for given key
try:
srcs: pd.DataFrame = self.listconcat('Sources', rtnmk=False) # open the Sources result
self.ra, self.dec = srcs.ra[0], srcs.dec[0]
except (KeyError, AttributeError):
pass
return
def listconcat(self, key: str, rtnmk: bool = True) -> Union[pd.DataFrame, str]:
"""
Concatenates the list for a given key
Parameters
----------
key: str
The key corresponding to the inventory
rtnmk: bool
Switch for whether to return either a markdown string or a dataframe
"""
obj: List[dict] = self.results[key] # the value for the given key
df: pd.DataFrame = pd.concat([pd.DataFrame(objrow, index=[i]) # create dataframe from found dict
for i, objrow in enumerate(obj)], ignore_index=True) # every dict in the list
if rtnmk: # return markdown boolean
return markdown(df.to_html(index=False)) # wrap the dataframe into html then markdown
return df # otherwise return dataframe as is
class CheckResultsLength(object):
"""
Validation class for use in the searchbar
"""
def __call__(self, form, field):
"""
Runs when class called
Parameters
----------
form
The form object
field
Current values in the form
"""
db = SimpleDB(db_file, connection_arguments={'check_same_thread': False}) # open database
results = db.search_object(field.data, fmt='astropy') # search by what is currently in searchbar
if not len(results): # if that search is empty
field.errors[:] = [] # clear existing errors
raise StopValidation(field.gettext('No results')) # stop validating and return error
class SearchForm(FlaskForm):
"""
Searchbar class
"""
search = StringField('', [DataRequired(), CheckResultsLength()], id='autocomplete') # searchbar
submit = SubmitField('Query') # clicker button to send request
def all_sources():
"""
Queries the full table to get all the sources
Returns
-------
allresults
Just the main IDs
fullresults
The full dataframe of all the sources
"""
db = SimpleDB(db_file, connection_arguments={'check_same_thread': False}) # open database
fullresults: pd.DataFrame = db.query(db.Sources).pandas()
allresults: list = fullresults['source'].tolist() # gets all the main IDs in the database
return allresults, fullresults
def find_colours(photodf: pd.DataFrame, allbands: np.ndarray):
"""
Find all the colours using available photometry
Parameters
----------
photodf: pd.DataFrame
The dataframe with all photometry in
allbands: np.ndarray
All the photometric bands
Returns
-------
photodf: pd.DataFrame
The dataframe with all photometry and colours in
"""
for i, band in enumerate(allbands): # loop over all bands TODO: sort by wavelength?
j = 1 # start count
while j < 20:
if i + j == len(allbands): # last band
break
nextband: str = allbands[i + j] # next band
j += 1
try:
photodf[f'{band}_{nextband}'] = photodf[band] - photodf[nextband] # colour
except KeyError:
continue
return photodf
def parse_photometry(photodf: pd.DataFrame, allbands: np.ndarray, multisource: bool = False) -> dict:
"""
Parses the photometry dataframe handling multiple references for same magnitude
Parameters
----------
photodf: pd.DataFrame
The dataframe with all photometry in
allbands: np.ndarray
All the photometric bands
multisource: bool
Switch whether to iterate over initial dataframe with multiple sources
Returns
-------
newphoto: dict
Dictionary of effectively transposed photometry
"""
def one_source_iter(onephotodf: pd.DataFrame):
"""
Parses the photometry dataframe handling multiple references for same magnitude for one object
Parameters
----------
onephotodf: pd.DataFrame
The dataframe with all the photometry in it
Returns
-------
thisnewphot: dict
Dictionary of transposed photometry
arrsize: int
The number of rows in the dictionary
"""
refgrp = onephotodf.groupby('reference') # all references grouped
arrsize: int = len(refgrp) # the number of rows
thisnewphot = {band: [None, ] * arrsize for band in onephotodf.band.unique()} # initial dictionary
thisnewphot['ref'] = [None, ] * arrsize # references
for i, (ref, refval) in enumerate(refgrp): # over all references
for band, bandval in refval.groupby('band'): # over all bands
thisnewphot[band][i] = bandval.iloc[0].magnitude # given magnitude (0 index of length 1 dataframe)
thisnewphot['ref'][i] = ref # reference for these mags
return thisnewphot, arrsize
if not multisource:
newphoto = one_source_iter(photodf)[0]
else:
newphoto: dict = {band: [] for band in np.hstack([allbands, ['ref', 'target']])} # empty dict
for target, targetdf in photodf.groupby('source'):
specificphoto, grplen = one_source_iter(targetdf) # get the dictionary for this object photometry
targetname = [target, ] * grplen # list of the target name
for key in newphoto.keys(): # over all keys
key: str = key
if key == 'target':
continue
try:
newphoto[key].extend(specificphoto[key]) # extend the list for given key
except KeyError: # if that key wasn't present for the object
newphoto[key].extend([None, ] * grplen) # use None as filler
newphoto['target'].extend(targetname) # add target to table
newphotocp: dict = newphoto.copy()
for key in newphotocp:
key: str = key
if key in ('ref', 'target'): # other than these columns
continue
newkey: str = key.replace('.', '_') # swap dot for underscore
newphoto[newkey] = newphoto[key].copy()
del newphoto[key]
return newphoto
def all_photometry():
"""
Get all the photometric data from the database to be used in later CMD as background
Returns
-------
allphoto: pd.DataFrame
All the photometry in a dataframe
allbands: np.ndarray
The unique passbands to create dropdowns by
"""
db = SimpleDB(db_file, connection_arguments={'check_same_thread': False}) # open database
allphoto: pd.DataFrame = db.query(db.Photometry).pandas() # get all photometry
allbands: np.ndarray = allphoto.band.unique() # the unique bands
outphoto: dict = parse_photometry(allphoto, allbands, True) # transpose photometric table
allbands = np.array([band.replace('.', '_') for band in allbands])
allphoto = | pd.DataFrame(outphoto) | pandas.DataFrame |
'''
__author__=<NAME>
MIT License
Copyright (c) 2020 crewml
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import pandas as pd
from category_encoders import TargetEncoder
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn.metrics import classification_report, confusion_matrix
import xgboost as xgb
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
import logging
from crewml.common import DATA_DIR
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import balanced_accuracy_score
import crewml.common as st
import pickle
from sklearn import preprocessing
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import joblib
class PairingLogRegressor:
def __init__(self, feature_file,
pairing_month,
pairing_model_output_file,
paring_model_file):
'''
Parameters
----------
feature_file : TYPE
Name of input file to read pairing data
Returns
-------
None.
'''
self.logger = logging.getLogger(__name__)
self.feature_file = feature_file
self.pairing_df = None
self.target_df = None
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.pairing_month = pairing_month
self.pairing_model_output_file = pairing_model_output_file
self.paring_model_file = paring_model_file
def process(self):
'''
Prepars Pairing data for Logistic Regression
Returns
-------
None.
'''
self.pairing_df = pd.read_csv(
DATA_DIR+self.pairing_month+"/"+self.feature_file)
self.pairing_df.drop(self.pairing_df.filter(
regex="Unname"), axis=1, inplace=True)
self.clean_pairing()
pair_freq = self.select_pairings(400)
self.pairing_df = self.pairing_df.loc[self.pairing_df['PAIRING_ID']
.isin(pair_freq['index1'])]
# convert timedetal to seconds
self.pairing_df['AIR_TIME'] = pd.to_timedelta(
self.pairing_df['AIR_TIME']).dt.seconds
self.pairing_df['TOT_DUTY_TM'] = pd.to_timedelta(
self.pairing_df['TOT_DUTY_TM']).dt.seconds
self.pairing_df['TOT_PAIRING_UTC'] = pd.to_timedelta(
self.pairing_df['TOT_PAIRING_UTC']).dt.seconds
# CRS_ELAPSED_TIME is same as AIR_TIME it can be deleted
# self.pairing_df['CRS_ELAPSED_TIME'] = self.pairing_df['CRS_ELAPSED_TIME'].astype(int)
# self.pairing_df['CRS_ELAPSED_TIME'] = self.pairing_df['CRS_ELAPSED_TIME'].multiply(60)
self.remove_duty_columns()
self.target_df = pd.DataFrame()
self.target_df['PAIRING_ID'] = self.pairing_df['PAIRING_ID']
self.encode_pairing_target()
# copy the selected features to save it to output file
self.selected_pairing_df = self.pairing_df.copy()
self.selected_pairing_df.to_csv(
DATA_DIR+self.pairing_month+"/"+self.pairing_model_output_file)
del self.pairing_df['PAIRING_ID']
def get_selected_pairings(self):
'''
Return subset of Pairings used to train the model. Only the flight IDs
returned in this model is used in Model deployment to identify the
PAIRING_IDs for new month
'''
return self.selected_pairing_df
def select_pairings(self, total):
'''
This function selects total pairings to be given to the model.
Instead of passing all the pairings, we can select "total" number of
pairings to train and test the model. There are many pairing exist
with only two flights in it and it is not enough to train a pairing
category just with two flights. Until we can get more data by
combining multiple months or improving our pairing generation
algorithm to incude more flights for a given pairing, this function
will choose top "total" pairings which has more flights in it.
Returns
-------
pair_freq : pairing frequencey
'''
pair_freq = self.pairing_df['PAIRING_ID'].value_counts(dropna=False)
pair_freq.index = pair_freq.index.map(int)
pair_freq = pair_freq[:total]
pair_freq = pair_freq.to_frame()
pair_freq['index1'] = pair_freq.index
return pair_freq
def clean_pairing(self):
'''
Clean the data for Nan and too large values
Returns
-------
None.
'''
assert isinstance(
self.pairing_df, pd.DataFrame), "df needs to be a pd.DataFrame"
self.pairing_df.dropna(inplace=True)
'''
indices_to_keep = ~self.pairing_df.isin(
[np.nan, np.inf, -np.inf]).any(1)
self.pairing_df[indices_to_keep].astype(np.float64)
'''
def encode_pairing_target(self):
'''
Use label encoder to encode the target pairing Ids to start from
0, 1, 2, ... XGBoost requires target to start from 0 instead of
random PAIRING_IDs selected from select_pairings() function.
Returns
-------
None.
'''
le = preprocessing.LabelEncoder()
le.fit(self.target_df)
encoded = le.transform(self.target_df)
self.target_df = | pd.DataFrame(encoded, columns=['PAIRING_ID']) | pandas.DataFrame |
from data import CITIES, BUSINESSES, USERS, REVIEWS, TIPS, CHECKINS
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import random
"""Deze functie wordt gebruikt om de ratings voor de utility matrix te berekenen"""
def get_rating(ratings,user_id,business_id):
if ratings.loc[(ratings['user_id'] == user_id) & (ratings['business_id'] == business_id)]['stars'].any() == False:
res = np.nan
else:
res = float(ratings.loc[(ratings['user_id'] == user_id) & (ratings['business_id'] == business_id),'stars'].values[0])
return res
"""Deze functie wordt gebruikt om een utility matrix te maken"""
def pivot_ratings(df):
""" takes a rating table as input and computes the utility matrix """
business_ids = df['business_id'].unique()
user_ids = df['user_id'].unique()
# create empty data frame
pivot_data = pd.DataFrame(np.nan, columns=user_ids, index=business_ids, dtype=float)
# use the function get_rating to fill the matrix
for x in pivot_data:
for y in pivot_data.index:
pivot_data[x][y] = get_rating(df,x,y)
return pivot_data
"""We hebben het verschil tussen cosine en euclid similarity getest"""
# def cosine_angle(matrix, id1, id2):
# """Compute euclid distance between two rows."""
# if id1 == id2:
# return 1
# # only take the features that have values for both id1 and id2
# selected_features = matrix.loc[id1].notna() & matrix.loc[id2].notna()
#
# # if no matching features, return NaN
# if not selected_features.any():
# return 0.0
#
# # get the features from the matrix
# features1 = matrix.loc[id1][selected_features]
# features2 = matrix.loc[id2][selected_features]
# top=0
# squared1=0
# squared2=0
#
# # compute the distances for the features
# distances = features1 * features2
# for x in distances:
# top = top + x
# for x in features1:
# squared1 = squared1 + (x*x)
# for x in features2:
# squared2 = squared2 + (x*x)
#
# bottom = np.sqrt(squared1) * np.sqrt(squared2)
# if bottom == 0:
# return 0.0
#
# res = top/bottom
# return res
# def create_similarity_matrix_cosine(matrix):
# """ creates the similarity matrix based on cosine similarity """
# similarity_matrix = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)
# for x in similarity_matrix:
# for y in similarity_matrix.index:
# similarity_matrix[x][y] = cosine_angle(matrix,x,y)
#
# return similarity_matrix
def mean(frame, group_index, avg_index):
return frame.groupby(group_index)[avg_index].mean()
def select_neighborhood(similarity_matrix, utility_matrix, target_user, target_business):
"""selects all items with similarity > 0"""
seen = []
a = {}
for i in utility_matrix.index:
if pd.isnull(utility_matrix[target_user][i]):
pass
else:
seen.append(i)
for x in similarity_matrix:
if similarity_matrix[target_business][x] > 0 and similarity_matrix[target_business][x] < 1 and x in seen:
a.update({x:similarity_matrix[target_business][x]})
res = pd.Series(a)
return res
def weighted_mean(neighborhood, utility_matrix, business_id):
top = 0
bottom = 0
res=0
test = []
if neighborhood.empty:
return 0.0
for x,y in neighborhood.iteritems():
top = top + (utility_matrix[business_id][x] * y)
bottom = bottom + y
if bottom == 0:
return 0.0
res = top/bottom
return res
def euclid_distance(matrix, id1, id2):
"""Compute euclid distance between two rows."""
# only take the features that have values for both id1 and id2
selected_features = matrix.loc[id1].notna() & matrix.loc[id2].notna()
# if no matching features, return NaN
if not selected_features.any():
return np.nan
# get the features from the matrix
features1 = matrix.loc[id1][selected_features]
features2 = matrix.loc[id2][selected_features]
# compute the distances for the features
distances = features1 - features2
squared = 0
# return the absolute sum
for x in distances:
squared = squared + x*x
res = np.sqrt(squared)
return res
def euclid_similarity(matrix, id1, id2):
"""Compute euclid similarity between two rows."""
# compute distance
distance = euclid_distance(matrix, id1, id2)
# if no distance could be computed (no shared features) return a similarity of 0
if distance is np.nan:
return 0
# else return similarity
return 1 / (1 + distance)
# TODO
def create_similarity_matrix_euclid(matrix):
similarity_matrix = pd.DataFrame(0, index=matrix.index, columns=matrix.index, dtype=float)
for x in similarity_matrix:
for y in similarity_matrix.index:
similarity_matrix[x][y] = euclid_similarity(matrix,x,y)
return similarity_matrix
def mean_center_rows(matrix):
matrix1 = pd.DataFrame(matrix)
new_matrix = | pd.DataFrame(0, index=matrix.index, columns=matrix.columns,dtype=float) | pandas.DataFrame |
"""Assemble network flow paths with weighted flows on the rail node and edge network of GB
Takes the OD matrix result generated by the rail_od_flows.py
"""
import sys
import os
import ast
import pandas as pd
import geopandas as gpd
from collections import defaultdict
import numpy as np
import configparser
from tqdm import tqdm
tqdm.pandas()
#############################################################################################
# setup file locations and data files
# The project contents are organised in a base folder that contains the following subfolders:
# - network: Folder containing GIS node and edge files of the railway network
# - outputs: The OD matrix result folder
#############################################################################################
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
NETWORK_PATH = os.path.join(BASE_PATH,'network')
FLOW_PATH = os.path.join(BASE_PATH,'outputs')
tqdm.pandas()
CRS = 'epsg:27700'
#####################################
# READ MAIN DATA
#####################################
def flow_paths_to_network_assets(paths_df,path_id_column,path_flow_id_column,gdf_file,gdf_file_id_column):
"""Add OD flow paths value of nodes/edges
Outputs ``gdf_edges`` - a shapefile with od flows of all
each node/edge of network.
Parameters
---------
paths_df
Pandas DataFrame of OD flow paths and their flow
path_id_column
String name of th enode/edgeee path id
gdf_edges
GeoDataFrame of network edge set
save_csv
Boolean condition to tell code to save created edge csv file
save_shapes
Boolean condition to tell code to save created edge shapefile
shape_output_path
Path where the output shapefile will be stored
csv_output_path
Path where the output csv file will be stored
"""
path_flow = defaultdict(float)
for row in paths_df.itertuples():
for item in getattr(row,path_id_column):
path_flow[item] += getattr(row,path_flow_id_column)
path_flow = pd.DataFrame(path_flow.items(),columns=[gdf_file_id_column,path_flow_id_column])
gdf_file = | pd.merge(gdf_file,path_flow,how='left',on=[gdf_file_id_column]) | pandas.merge |
import duckdb
import pandas as pd
import numpy
import pytest
def check_category_equal(category):
df_in = pd.DataFrame({
'x': | pd.Categorical(category, ordered=True) | pandas.Categorical |
import re
from datetime import datetime
from os import listdir
from os.path import basename, dirname, isfile, join
from flask import (Flask, json, jsonify, render_template, request, send_file,
send_from_directory)
from werkzeug.utils import secure_filename
import pandas as pd
from ClassifierManager import ClassifierManager
from Exceptions import InputError
from flask_cors import CORS
from sklearn.externals import joblib
from sklearn.metrics import (accuracy_score, classification_report,
confusion_matrix, f1_score,
precision_recall_fscore_support, precision_score,
recall_score)
# Set up variables
STORE_LOCATION = 'store'
app = Flask(__name__)
app.config['STORE_LOCATION'] = STORE_LOCATION
CORS(app)
classifierManager = ClassifierManager()
classifierManager.loadAll()
# Get list of models
@app.route('/models')
def get_models():
model_store = app.config['STORE_LOCATION']
models = [f for f in listdir(model_store) if (isfile(join(model_store, f)) and f != "empty.md")]
print(models)
results = []
for model in models:
result = {}
result['name'] = model
result['url'] = "http://" + request.host + "/models/" + model
results.append(result)
response = app.response_class(
response=json.dumps(results),
mimetype='application/json'
)
return response
# Upload model from client to server
@app.route('/models/upload', methods = ['POST'])
def upload_model():
model_name = request.form.get('modelName')
if not 'modelFile' in request.files.keys():
return 404
model = load_model(request.files['modelFile'])
save_model(model, model_name)
return jsonify(modelName = "model_name")
# Get model from server
@app.route('/models/<model_name>')
def get_stored_model(model_name):
model_file_path = join(app.config['STORE_LOCATION'], model_name)
if not isfile(model_file_path):
return 404
return send_from_directory(app.config['STORE_LOCATION'], model_name)
# Train model
@app.route('/models/train', methods = ['POST'])
def train_model():
if not request.files:
return 400
data = request.files['dataFile']
model_name = request.form.get('modelName')
if 'modelFile' in request.files.keys():
model = load_model(request.files['modelFile'])
else:
existing_model_name = request.form.get('existingModelName')
if not existing_model_name:
return 400
model_file_path = join(app.config['STORE_LOCATION'], existing_model_name)
if not isfile(model_file_path):
return 404
model = load_model(model_file_path)
train(data, model)
results = save_model(model, model_name)
return jsonify(results)
# Test model uploaded from client against input data uploaded from client
@app.route('/models/predict', methods =['POST'])
def predict_uploaded_model():
if 'modelFile' in request.files.keys():
model = load_model(request.files['modelFile'])
else:
return 400
dataframe = read_files(request.files['dataFile'])
text = dataframe.iloc[:, 1]
x = model.pre_process(dataframe.iloc[:, 1])
y = dataframe.iloc[:, 0]
y_predictions = model.predict(x)
classifications = get_classifications(text, y, y_predictions)
return jsonify(
accuracy = accuracy_score(y, y_predictions),
classificationMatrix = classification_report_data(classification_report(y, y_predictions)),
classifications = classifications)
# Test model already on server against input data uploaded from client
@app.route('/models/<model_name>/predict', methods =['POST'])
def predict_model(model_name):
model_file_path = join(app.config['STORE_LOCATION'], model_name)
if not isfile(model_file_path):
return 404
model = load_model(model_file_path)
dataframe = read_files(request.files['dataFile'])
text = dataframe.iloc[:, 1]
x = model.pre_process(dataframe.iloc[:, 1])
y = dataframe.iloc[:, 0]
y_predictions = model.predict(x)
classifications = get_classifications(text, y, y_predictions)
return jsonify(
accuracy = accuracy_score(y, y_predictions),
classificationMatrix = classification_report_data(classification_report(y, y_predictions)),
classifications = classifications)
# Get classification predictions
def get_classifications(x, y, y_predictions):
classifications=[]
for data, label, prediction in zip(x, y, y_predictions):
result = {}
result['text'] = data
result['label'] = label
result['prediction'] = prediction
result['result'] = "Positive" if label == prediction else "Negative"
classifications.append(result)
return classifications
# Test model already on server against input data entered in form
@app.route('/models/<model_name>/predictOne', methods =['POST'])
def predict_model_one(model_name):
model_file_path = join(app.config['STORE_LOCATION'], model_name)
if not isfile(model_file_path):
return 404
model = load_model(model_file_path)
content = request.get_json(force=True)
text = content['text']
print(text)
dataframe = pd.DataFrame([text])
x = model.pre_process([text])
y = model.predict(x)
return jsonify(text = text, prediction = y[0])
# Get list of classifiers
@app.route('/classifiers')
def get_classifiers():
return jsonify([
{'id': classifier_name, 'name': classifier_name}
for classifier_name in classifierManager.classifiers.keys()])
# Get classifier
@app.route('/classifiers/<classifier_name>')
def get_classifier(classifier_name):
if not classifier_name in classifierManager.classifiers.keys():
return 404
return jsonify(name = classifier_name)
# Train classifier
@app.route('/classifiers/<classifier_name>/train', methods =['POST'])
def train_classifier(classifier_name):
if not classifier_name in classifierManager.classifiers.keys():
return 404
if not request.files or not request.files['dataFile']:
return 400
model_name = request.form.get('modelName')
data = request.files['dataFile']
classifier = classifierManager.classifiers[classifier_name]
train(data, classifier)
results = save_model(classifier, model_name)
return jsonify(results)
# Read file method
def read_files(file_obj):
file_type = file_obj.filename[file_obj.filename.rfind('.'):]
dataset = None
if (file_type == '.json'):
dataset = pd.read_json(file_obj)
elif (file_type == '.csv'):
dataset = | pd.read_csv(file_obj, encoding="ISO-8859-1") | pandas.read_csv |
# This file need to be send to the cluster via .addPyFile to handle the pickle problem
# This is outside the optimus folder on purpose because it cause problem importing optimus when using de udf.
# This can not import any optimus file unless it's imported via addPyFile
import datetime
import math
import os
import re
from ast import literal_eval
import fastnumbers
import pandas as pd
import pendulum
from dask import distributed
from dask.dataframe.core import DataFrame as DaskDataFrame
# This function return True or False if a string can be converted to any datatype.
from optimus.helpers.constants import ProfilerDataTypes, CURRENCIES
def is_datetime_str(_value):
try:
pendulum.parse(_value, strict=False)
return True
except ValueError:
return False
def str_to_date_format(_value, date_format):
# Check this https://stackoverflow.com/questions/17134716/convert-dataframe-column-type-from-string-to-datetime-dd-mm-yyyy-format
try:
pendulum.from_format(_value, date_format)
return True
except ValueError:
return False
def str_to_null(_value):
_value = _value.lower()
if _value == "null":
return True
else:
return False
def is_null(_value):
return | pd.isnull(_value) | pandas.isnull |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
def get_results(data, verbose = True):
print('Analyzing Individual Score')
md = smf.mixedlm("score ~ n_players + noise", data, groups=data["game"])
mdf = md.fit()
if verbose:
print(mdf.summary())
noises = []
game_ids = sorted(list(set(data['game'])))
for g in game_ids:
noises += [(data.loc[data['game'] == g,'noise']).iloc[0]]
game_df_max = []
comps = {}
reps = 10
for n in range(1,7):
comps[n] = {}
for noise in ['0','1','2','3']:
comps[n][noise] = 0
for i in range(reps):
comp = data.loc[(data['n_players'] == 1) & (data['noise'] == noise), 'score']
comps[n][noise] += np.max(np.random.choice(comp, size = n))
comps[n][noise] /= float(reps)
for g in set(data['game']):
sub = data.loc[data['game'] == g]
n = sub.iloc[0]['n_players']
noise = sub.iloc[0]['noise']
score = max(sub['score']) - comps[n][noise]
game_df_max += [[g, noise, n, score]]
game_df_max = pd.DataFrame(game_df_max)
game_df_max.columns = ['game','noise','n_players','max_score']
game_df_min = []
comps = {}
reps = 10
for n in range(1,7):
comps[n] = {}
for noise in ['0','1','2','3']:
comps[n][noise] = 0
for i in range(reps):
comp = data.loc[(data['n_players'] == 1) & (data['noise'] == noise), 'score']
comps[n][noise] += np.min(np.random.choice(comp, size = n))
comps[n][noise] /= float(reps)
for g in set(data['game']):
sub = data.loc[data['game'] == g]
n = sub.iloc[0]['n_players']
noise = sub.iloc[0]['noise']
score = min(sub['score']) - comps[n][noise]
game_df_min += [[g, noise, n, score]]
game_df_min = pd.DataFrame(game_df_min)
game_df_min.columns = ['game','noise','n_players','min_score']
print('Analyzing Max Score')
mod = smf.ols(formula='max_score ~ n_players + noise', data=game_df_max)
gres = mod.fit()
if verbose:
print(gres.summary())
rgres = gres.get_robustcov_results()
if verbose:
print(rgres.summary())
print('Analyzing Min Score')
mod = smf.ols(formula='min_score ~ n_players + noise', data=game_df_min)
gres = mod.fit()
if verbose:
print(gres.summary())
rgres = gres.get_robustcov_results()
if verbose:
print(rgres.summary())
print('Analyzing Mean Score')
game_df = data.groupby('game').mean()
game_df['noise'] = noises
mod = smf.ols(formula='score ~ n_players + noise', data=game_df)
gres = mod.fit()
if verbose:
print(gres.summary())
rgres = gres.get_robustcov_results()
if verbose:
print(rgres.summary())
return mdf, gres, rgres
def bootstrap_data(data, num_games):
boot_data = | pd.DataFrame() | pandas.DataFrame |
# Import packages
import os
from glob import glob
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import math
from scipy import mean, std
import scipy.stats as stats
from random import randint, random
import cv2
import matplotlib.pylab as plt
import matplotlib.patches as patches
import seaborn as sns
# Definitions for genetic algorithm and fitness function
# colors for fitness function - a_channel
os_norm_mean_target = 1.94
endo_norm_mean_target = 2.24
zone_norm_mean_target = 0.74
os_norm_std_target = 0.40
endo_norm_std_target = 0.50
zone_norm_std_target = 0.64
# resize ratio for computational speed
resize_ratio = 0.1
# genetic algorithm parameters
population_size = 200
retain_ratio = 0.2
mutation_rate = 0.05
random_selection_rate = 0.05
generations = 20
# Input data files are available in the "../input/" directory.
# Getting filenames from input directory
TRAIN_DATA = "../input/train"
type_1_files = glob(os.path.join(TRAIN_DATA, "Type_1", "*.jpg"))
type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, "Type_1")) + 1:-4] for s in type_1_files])
type_2_files = glob(os.path.join(TRAIN_DATA, "Type_2", "*.jpg"))
type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, "Type_2")) + 1:-4] for s in type_2_files])
type_3_files = glob(os.path.join(TRAIN_DATA, "Type_3", "*.jpg"))
type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, "Type_3")) + 1:-4] for s in type_3_files])
print(len(type_1_files), len(type_2_files), len(type_3_files))
print("Type 1", type_1_ids[:10])
print("Type 2", type_2_ids[:10])
print("Type 3", type_3_ids[:10])
TEST_DATA = "../input/test"
test_files = glob(os.path.join(TEST_DATA, "*.jpg"))
test_ids = np.array([s[len(TEST_DATA) + 1:-4] for s in test_files])
print(len(test_ids))
print(test_ids[:10])
ADDITIONAL_DATA = "../input/additional"
additional_type_1_files = glob(os.path.join(ADDITIONAL_DATA, "Type_1", "*.jpg"))
additional_type_1_ids = np.array(
[s[len(os.path.join(ADDITIONAL_DATA, "Type_1")) + 1:-4] for s in additional_type_1_files])
additional_type_2_files = glob(os.path.join(ADDITIONAL_DATA, "Type_2", "*.jpg"))
additional_type_2_ids = np.array(
[s[len(os.path.join(ADDITIONAL_DATA, "Type_2")) + 1:-4] for s in additional_type_2_files])
additional_type_3_files = glob(os.path.join(ADDITIONAL_DATA, "Type_3", "*.jpg"))
additional_type_3_ids = np.array(
[s[len(os.path.join(ADDITIONAL_DATA, "Type_3")) + 1:-4] for s in additional_type_3_files])
print(len(additional_type_1_files), len(additional_type_2_files), len(additional_type_3_files))
print("Type 1", additional_type_1_ids[:10])
print("Type 2", additional_type_2_ids[:10])
print("Type 3", additional_type_3_ids[:10])
def get_filename(image_id, image_type):
"""
Method to get image file path from its id and type
"""
if image_type == "Type_1" or image_type == "Type_2" or image_type == "Type_3":
data_path = os.path.join(TRAIN_DATA, image_type)
elif image_type == "Test":
data_path = TEST_DATA
elif image_type == "AType_1" or image_type == "AType_2" or image_type == "AType_3":
data_path = os.path.join(ADDITIONAL_DATA, image_type[1:])
else:
raise Exception("Image type '%s' is not recognized" % image_type)
ext = 'jpg'
return os.path.join(data_path, "{}.{}".format(image_id, ext))
def get_image_data(image_id, image_type, rsz_ratio=1):
"""
Method to get image data as np.array specifying image id and type
"""
fname = get_filename(image_id, image_type)
img = cv2.imread(fname)
if rsz_ratio != 1:
img = cv2.resize(img, dsize=(int(img.shape[1] * rsz_ratio), int(img.shape[0] * rsz_ratio)))
assert img is not None, "Failed to read image : %s, %s" % (image_id, image_type)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def lab_channels(img, display_image=False):
# Extracting the Lab color space into different variables
imgLab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
L_channel = imgLab[:,:,0]
a_channel = imgLab[:,:,1]
b_channel = imgLab[:,:,2]
if display_image==True:
plt.figure(figsize=(8,8))
plt.subplot(221)
plt.title("Original image")
plt.imshow(img), plt.xticks([]), plt.yticks([])
plt.subplot(222)
plt.title("L channel")
plt.imshow(L_channel, cmap='gist_heat'), plt.xticks([]), plt.yticks([])
plt.subplot(223)
plt.title("a channel")
plt.imshow(a_channel, cmap='gist_heat'), plt.xticks([]), plt.yticks([])
plt.subplot(224)
plt.title("b channel")
plt.imshow(b_channel, cmap='gist_heat'), plt.xticks([]), plt.yticks([])
return L_channel, a_channel, b_channel
def is_ellipse_in_ellipse(o_ell, i_ell, display_image=False):
# A function to asses if the outer ellipse contains the inner ellipse
# It's an approximation because i am checking only 4 points
# Future consideration - maybe add more points (8, 16)
# finding the boundaries of the inner ellipse
i_ell_center = i_ell.center
i_ell_width = i_ell.width
i_ell_height = i_ell.height
i_angle = i_ell.angle
cos_angle_in = np.cos(np.radians(180.-i_angle))
sin_angle_in = np.sin(np.radians(180.-i_angle))
xct_in=np.zeros(4)
yct_in=np.zeros(4)
xct_in[0] = i_ell_width/2
yct_in[0] = 0
xct_in[1] = -i_ell_width/2
yct_in[1] = 0
xct_in[2] = 0
yct_in[2] = i_ell_height/2
xct_in[3] = 0
yct_in[3] = -i_ell_height/2
xc_in = (xct_in * cos_angle_in + yct_in * sin_angle_in )
yc_in = (yct_in * cos_angle_in - xct_in * sin_angle_in )
x_in = i_ell_center[0] + xc_in
y_in = i_ell_center[1] + yc_in
# Placing the coordinates in the outer ellipse
g_ellipse = o_ell
g_ell_center = g_ellipse.center
g_ell_width = g_ellipse.width
g_ell_height = g_ellipse.height
angle = g_ellipse.angle
cos_angle = np.cos(np.radians(180.-angle))
sin_angle = np.sin(np.radians(180.-angle))
xc = x_in - g_ell_center[0]
yc = y_in - g_ell_center[1]
xct = xc * cos_angle - yc * sin_angle
yct = xc * sin_angle + yc * cos_angle
rad_cc = (xct**2/(g_ell_width/2.)**2) + (yct**2/(g_ell_height/2.)**2)
# Assume all points are in ellipse
all_ellipse_in = True
for r in rad_cc:
if r > 1.:
# point not in ellipse
all_ellipse_in = False
if (display_image==True):
colors_array = []
for r in rad_cc:
if r <= 1.:
# point in ellipse
colors_array.append('red')
else:
# point not in ellipse
colors_array.append('black')
fig,ax = plt.subplots(1)
#ax.set_aspect('equal')
ax.add_patch(g_ellipse)
ax.add_patch(i_ell)
ax.scatter(x_in,y_in,c=colors_array,linewidths=0.3)
plt.show()
return all_ellipse_in
def do_ellipses_intersect(i_ell, o_ell, display_image=False):
# A function to asses if the ellipses intersect
# It's an approximation because i am checking only 4 points of each ellipse
# Future consideration - maybe add more points (8, 16)
# finding the boundaries of the inner ellipse
i_ell_center = i_ell.center
i_ell_width = i_ell.width
i_ell_height = i_ell.height
i_angle = i_ell.angle
cos_angle_in = np.cos(np.radians(180.-i_angle))
sin_angle_in = np.sin(np.radians(180.-i_angle))
xct_in=np.zeros(4)
yct_in=np.zeros(4)
xct_in[0] = i_ell_width/2
yct_in[0] = 0
xct_in[1] = -i_ell_width/2
yct_in[1] = 0
xct_in[2] = 0
yct_in[2] = i_ell_height/2
xct_in[3] = 0
yct_in[3] = -i_ell_height/2
xc_in = (xct_in * cos_angle_in + yct_in * sin_angle_in )
yc_in = (yct_in * cos_angle_in - xct_in * sin_angle_in )
x_in = i_ell_center[0] + xc_in
y_in = i_ell_center[1] + yc_in
# Placing the coordinates in the outer ellipse
g_ellipse = o_ell
g_ell_center = g_ellipse.center
g_ell_width = g_ellipse.width
g_ell_height = g_ellipse.height
angle = g_ellipse.angle
cos_angle = np.cos(np.radians(180.-angle))
sin_angle = np.sin(np.radians(180.-angle))
xc = x_in - g_ell_center[0]
yc = y_in - g_ell_center[1]
xct = xc * cos_angle - yc * sin_angle
yct = xc * sin_angle + yc * cos_angle
rad_cc = (xct**2/(g_ell_width/2.)**2) + (yct**2/(g_ell_height/2.)**2)
# Assume no points are in ellipse
ellipses_intersect = False
for r in rad_cc:
if r <= 1.:
# point in ellipse
ellipses_intersect = True
if (display_image==True):
colors_array = []
for r in rad_cc:
if r <= 1.:
# point in ellipse
colors_array.append('red')
else:
# point not in ellipse
colors_array.append('black')
fig,ax = plt.subplots(1)
#ax.set_aspect('equal')
ax.add_patch(g_ellipse)
ax.add_patch(i_ell)
ax.scatter(x_in,y_in,c=colors_array,linewidths=0.3)
plt.show()
return ellipses_intersect
def is_point_in_ellipse(o_ell, x_in, y_in, display_image=False):
# Placing the coordinates in the outer ellipse
g_ellipse = o_ell
g_ell_center = g_ellipse.center
g_ell_width = g_ellipse.width
g_ell_height = g_ellipse.height
angle = g_ellipse.angle
cos_angle = np.cos(np.radians(180.-angle))
sin_angle = np.sin(np.radians(180.-angle))
xc = x_in - g_ell_center[0]
yc = y_in - g_ell_center[1]
xct = xc * cos_angle - yc * sin_angle
yct = xc * sin_angle + yc * cos_angle
rad_cc = (xct**2/(g_ell_width/2.)**2) + (yct**2/(g_ell_height/2.)**2)
# Assume all points are is in ellipse
point_in = True
if rad_cc > 1.:
# point not in ellipse
point_in = False
if (display_image==True):
colors_array = []
if rad_cc <= 1.:
# point in ellipse
colors_array.append('red')
else:
# point not in ellipse
colors_array.append('black')
fig,ax = plt.subplots(1)
#ax.set_aspect('equal')
ax.add_patch(g_ellipse)
ax.scatter(x_in,y_in,c=colors_array,linewidths=0.3)
plt.show()
return point_in
def crop_ellipse(img, ell, display_image=False):
# http://answers.opencv.org/question/25523/extract-an-ellipse-form-from-an-image-instead-of-drawing-it-inside/
# Crop ellipse works only for single channel images
# create a mask image of the same shape as input image, filled with 1s
mask = np.ones_like(img)
# create a zero filled ellipse
mask=cv2.ellipse(mask,
center=ell.center,
axes=(int(ell.width/2),int(ell.height/2)),
angle=ell.angle,
startAngle=0, endAngle=360, color=(0,0,0), thickness=-1)
# Creating a masked array containing only relevant pixels
cropped_ellipse = np.ma.masked_array(img, mask)
# create a mask image for the background, filled with 0s
background_mask = np.zeros_like(img)
# create a ones filled ellipse
background_mask=cv2.ellipse(background_mask,
center=ell.center,
axes=(int(ell.width/2),int(ell.height/2)),
angle=ell.angle,
startAngle=0, endAngle=360, color=(1,1,1), thickness=-1)
background = np.ma.masked_array(img, background_mask)
# Plotting the results
if (display_image):
outline = img.copy()
outline=cv2.ellipse(outline,
center=ell.center,
axes=(int(ell.width/2),int(ell.height/2)),
angle=ell.angle,
startAngle=0, endAngle=360, color=(90,90,90), thickness=4)
plt.figure(figsize=(6,6))
plt.subplot(121)
plt.imshow(cropped_ellipse, cmap='hot')
plt.subplot(122)
plt.imshow(outline, cmap='hot')
plt.show()
return cropped_ellipse, background
def create_ellipse(x, y, width, height, angle, edgecolor='black'):
return patches.Ellipse((x, y), width, height, angle=angle, fill=False, edgecolor=edgecolor, linewidth=2)
def ellipse_background(img, ell, ratio=1.3, display_image=False):
back_width = max(ell.width*ratio, ell.width + 4)
back_height = max(ell.height*ratio, ell.height + 4)
back_ell = create_ellipse(ell.center[0], ell.center[1], back_width, back_height, ell.angle)
front_ell = create_ellipse(ell.center[0], ell.center[1], ell.width, ell.height, ell.angle)
front_center = front_ell.center
front_axes = (int(front_ell.width/2),
int(front_ell.height/2))
front_angle = int(front_ell.angle)
img_ell = img.copy()
img_ell=cv2.ellipse(img_ell,
front_center,
front_axes,
front_angle,
startAngle=0, endAngle=360, color=(0,0,0), thickness=-1)
back_ell_image = crop_ellipse(img_ell, back_ell)
result = np.ma.masked_equal(back_ell_image, 0)
if (display_image):
plt.imshow(result)
return result
def extract_ellipses(a_image, os, endo, zone, display_image=False):
# Cropping out the ellipses from the image
os_image, os_background = crop_ellipse(a_image, os, display_image=False)
endo_image, endo_background = crop_ellipse(os_background, endo, display_image)
zone_image, _ = crop_ellipse(endo_background, zone, display_image)
return os_image, endo_image, zone_image
def outline_individual_on_image(img, individual):
# Creating ellipses from chromosome and extracting the relevant images
os_ell = create_ellipse(individual[0], individual[1], individual[2], individual[3], individual[4], edgecolor='red')
endo_ell = create_ellipse(individual[5], individual[6], individual[7], individual[8], individual[9], edgecolor='red')
zone_ell = create_ellipse(individual[10], individual[11], individual[12], individual[13], individual[14], edgecolor='red')
outline = img.copy()
outline = cv2.ellipse(outline,
center=os_ell.center,
axes=(int(os_ell.width/2),int(os_ell.height/2)),
angle=os_ell.angle,
startAngle=0, endAngle=360, color=(0,0,0), thickness=2)
outline = cv2.ellipse(outline,
center=endo_ell.center,
axes=(int(endo_ell.width/2),int(endo_ell.height/2)),
angle=endo_ell.angle,
startAngle=0, endAngle=360, color=(255,255,255), thickness=2)
outline = cv2.ellipse(outline,
center=zone_ell.center,
axes=(int(zone_ell.width/2),int(zone_ell.height/2)),
angle=zone_ell.angle,
startAngle=0, endAngle=360, color=(0,255,0), thickness=2)
return outline
# Genetic algorithm from
# https://lethain.com/genetic-algorithms-cool-name-damn-simple/
def create_individual(img):
# Creating an individual
# individual = [x, y, widht, height, angle] * 3
# Create a member of the population.'
w = img.shape[1]
h = img.shape[0]
max_size = max(w, h)
min_size = int(max_size * 0.5)
ind = [0]*15
# Ellipse #1 center
ind[0] = randint(0, w)
ind[1] = randint(0, h)
# Width and height
ind[2] = randint(2, max_size)
ind[3] = randint(2, max_size)
# Angle
ind[4] = randint(0, 180)
# Ellipse #2 center
ind[5] = randint(0, w)
ind[6] = randint(0, h)
# Width and height
ind[7] = randint(int(max_size*0.25), max_size)
ind[8] = randint(int(max_size*0.25), max_size)
# Angle
ind[9] = randint(0, 180)
# Ellipse #3 center
ind[10] = randint(0, w)
ind[11] = randint(0, h)
# Width and height
ind[12] = randint(min_size, max_size)
ind[13] = randint(min_size, max_size)
# Angle
ind[14] = randint(0, 180)
return ind
def init_population(img, count):
# Creating a population
pop = []
for i in range(count):
pop.append(create_individual(img))
return pop
def evolve(img, pop, retain, random_select, mutate):
# Evolution
retain_length = int(len(pop)*retain)
parents = pop[:retain_length]
# randomly add other individuals to
# promote genetic diversity
for individual in pop[retain_length:]:
if random_select > random():
parents.append(individual)
# mutate some individuals
for individual in parents:
if mutate > random():
pos_to_mutate = randint(0, len(individual)-1)
individual[pos_to_mutate] = create_individual(img)[pos_to_mutate]
# crossover parents to create children
parents_length = len(parents)
desired_length = len(pop) - parents_length
children = []
while len(children) < desired_length:
male = randint(0, parents_length-1)
female = randint(0, parents_length-1)
if male != female:
male = parents[male]
female = parents[female]
exchange_point = randint(0, len(male)-1)
child = male[:exchange_point] + female[exchange_point:]
children.append(child)
parents.extend(children)
return parents
def grade_type1(a_img, pop):
# Sort population according to fitness function
# Return sorted population and fitness function grades
init_pop = [ (type1_fitness(a_img, x), x) for x in pop]
init_pop = sorted(init_pop)
sorted_pop = [x[-1] for x in init_pop]
grades = [x[0][0] for x in init_pop]
elements = [ x[0][1] for x in init_pop]
return sorted_pop, grades, elements
# GA Fitness Functions
def type1_fitness(a_channel, individual, display_image=False):
penalty = 25.0
# Zeroing all of the fitness function elements
fitness = 0.0
os_std = os_mean = endo_std = endo_mean = zone_std = zone_mean = 0.0
os_out_of_endo_penalty = os_out_of_zone_penalty = endo_out_of_zone_penalty = 0.0
endo_in_os_penalty = zone_in_os_penalty = zone_in_endo_penalty = zone_is_empty_penalty = 0.0
# Creating ellipses from chromosome and extracting the relevant images
os_ell = create_ellipse(individual[0], individual[1], individual[2], individual[3], individual[4], edgecolor='red')
endo_ell = create_ellipse(individual[5], individual[6], individual[7], individual[8], individual[9],
edgecolor='red')
zone_ell = create_ellipse(individual[10], individual[11], individual[12], individual[13], individual[14],
edgecolor='red')
os_image, endo_image, zone_image = extract_ellipses(a_channel, os_ell, endo_ell, zone_ell, display_image)
os_array = np.ma.MaskedArray.flatten(os_image)
endo_array = np.ma.MaskedArray.flatten(endo_image)
zone_array = np.ma.MaskedArray.flatten(zone_image)
### Checking the position of all ellipses relatively to one another
# OS inside ENDO?
os_in_endo = True if (is_ellipse_in_ellipse(endo_ell, os_ell, display_image=False)) else False
# ENDO is inside OS?
endo_in_os = True if (is_ellipse_in_ellipse(os_ell, endo_ell, display_image=False)) else False
# OS inside ZONE?
os_in_zone = True if (is_ellipse_in_ellipse(zone_ell, os_ell, display_image=False)) else False
# ENDO inside ZONE?
endo_in_zone = True if (is_ellipse_in_ellipse(zone_ell, endo_ell, display_image=False)) else False
# ZONE inside OS?
zone_in_os = True if (is_ellipse_in_ellipse(os_ell, zone_ell, display_image=False)) else False
# ZONE inside ENDO?
zone_in_endo = True if (is_ellipse_in_ellipse(endo_ell, zone_ell, display_image=False)) else False
## OS ellipse
# OS mean color
os_mean = abs(np.mean(os_array) - os_norm_mean_target)
os_std = abs(np.std(os_array) - os_norm_std_target)
# Add penalty if OS is not indside ENDO
if (os_in_endo == False):
os_out_of_endo_penalty = penalty
# Add penalty if OS is not indside ZONE
if (os_in_zone == False):
os_out_of_zone_penalty = penalty
## ENDO ellipse
# Checking that the ENDO ellipse is not empty
if ((endo_in_os == False) and (endo_image.mask.all() == False)):
# ENDO mean color
endo_mean = abs(np.mean(endo_array) - endo_norm_mean_target)
endo_std = abs(np.std(endo_array) - endo_norm_std_target)
# Else add a penalty for ENDO being fully inside OS
else:
endo_in_os_penalty = penalty
# Checking if ENDO is indside ZONE
if (endo_in_zone == False):
endo_out_of_zone_penalty = penalty
## ZONE ellipse
# Checking the ZONE ellipse in not empty
if (zone_in_os == False) and (zone_in_endo == False) and (zone_image.mask.all() == False):
# ZONE mean color
zone_mean = abs(np.mean(zone_array) - zone_norm_mean_target)
zone_std = abs(np.std(zone_array) - zone_norm_std_target)
# Else add a penalty
else:
if (zone_in_os): zone_in_os_penalty = penalty
if (zone_in_endo): zone_in_endo_penalty = penalty
if (zone_in_os == False) and (zone_in_endo == False) and (
zone_image.mask.all() == True): zone_is_empty_penalty = penalty
fitness += os_std + endo_std + zone_std
fitness += os_mean + endo_mean + zone_mean
fitness += os_out_of_endo_penalty + os_out_of_zone_penalty + endo_out_of_zone_penalty
fitness += endo_in_os_penalty + zone_in_os_penalty + zone_in_endo_penalty + zone_is_empty_penalty
# Displaying the results
if (display_image):
result_image = outline_individual_on_image(img, individual)
plt.figure(figsize=(6, 6))
plt.title("Ellipse outline")
plt.imshow(result_image), plt.xticks([]), plt.yticks([])
return fitness, [os_mean, os_std, endo_mean, endo_std, zone_mean, zone_std,
os_out_of_endo_penalty, os_out_of_zone_penalty, endo_out_of_zone_penalty,
endo_in_os_penalty, zone_in_os_penalty, zone_in_endo_penalty, zone_is_empty_penalty]
# Type 1 Genetic Algorithm
def type1_GA(img,
pop_size=population_size,
r_ratio=retain_ratio,
mut_rate=mutation_rate,
rnd_rate=random_selection_rate,
gen=generations,
display_image=False):
# Getting a normalized image from the a channel in the Lab color space
_, a_image, _ = lab_channels(img, display_image=False)
a_image, _ = return_z_score(a_image)
# a_image, _ = return_z_score(img[1,:,:]) # Red colors from image - normalized
best_grade_history = []
average_grade_history = []
best_pop_history = []
best_elements_history = []
if (display_image):
print('Init population - Type 1 GA\n')
print('Population size: %s\nRetain ratio: %s\nMutation rate: %s\nRandom selection chance: %s\nGenerations: %s' %
(pop_size, r_ratio, mut_rate, rnd_rate, gen))
pop = init_population(a_image, pop_size)
pop, grades, elements = grade_type1(a_image, pop)
best_elements_history.append(elements[0])
best_grade_history.append(grades[0])
average_grade_history.append(mean(grades))
best_pop_history.append(pop[0])
counter = 1
best_grade_achieved = False
no_change_in_grade = 0
while best_grade_achieved == False:
pop = evolve(a_image, pop, retain=r_ratio, mutate=mut_rate, random_select=rnd_rate)
pop, grades, elements = grade_type1(a_image, pop)
best_elements_history.append(elements[0])
best_grade_history.append(grades[0])
average_grade_history.append(mean(grades))
best_pop_history.append(pop[0])
counter += 1
if best_grade_history[-1] == best_grade_history[-2]:
no_change_in_grade += 1
else: no_change_in_grade = 0
if gen > 0:
if counter == gen: best_grade_achieved = True
elif no_change_in_grade == abs(gen):
best_grade_achieved = True
if (display_image):
print('\nGeneration %i. There is no change in the best grade for %i generations.' % (counter, no_change_in_grade))
print('Best grade [-1] = %f, Best grade [-2] = %f' % (best_grade_history[-1], best_grade_history[-2]))
print(best_elements_history[-1])
return best_pop_history, best_grade_history, average_grade_history, best_elements_history
# Visualizing the algorithm evolution
def plot_fitness_function_scores(best_score, average_score):
plt.plot(best_score)
plt.plot(average_score)
plt.show()
return
def plot_elements(fitness, elements):
e = np.transpose(elements)
all_elements = range(e.shape[0])
for i in all_elements:
plt.plot(e[i], label="n={0}".format(i))
plt.legend(loc="upper right",
ncol=2, shadow=True, title="Legend", fancybox=True)
plt.show()
def sample_algorithm(gen_number=generations, type1_image_id='470'):
image = get_image_data(type1_image_id, 'Type_1', 0.1)
plt.imshow(image), plt.xticks([]), plt.yticks([])
pop1, gra1, avg1, ele1 = type1_GA(image, gen=gen_number, display_image=True)
aaa = outline_individual_on_image(image, pop1[-1])
plt.imshow(aaa), plt.xticks([]), plt.yticks([])
plt.show()
print(ele1)
plot_fitness_function_scores(gra1, avg1)
plot_elements(gra1, ele1)
# animation from matplotlib examples
# from matplotlib import animation, rc
# from IPython.display import HTML
"""
=================
An animated image
=================
This example demonstrates how to animate an image.
"""
import matplotlib.animation as animation
fig = plt.figure()
im = plt.imshow(image, animated=True)
def updatefig(i):
plt.title('Generation %i out of %i' % (i + 1, len(pop1)))
im.set_array(outline_individual_on_image(image, pop1[i]))
plt.draw()
return im,
ani = animation.FuncAnimation(fig, updatefig, np.arange(0, len(pop1)), interval=300, blit=True)
plt.show()
ani.save('Type1GA.mp4', fps=10, writer='ffmpeg', codec='mpeg4', dpi=100)
# HTML(ani.to_html5_video())
return
def analyze_individual_color(a_channel, individual, display_image=False):
os_std = os_mean = endo_std = endo_mean = zone_std = zone_mean = 0
# Creating ellipses from chromosome and extracting the relevant images
os_ell = create_ellipse(individual[0], individual[1], individual[2], individual[3], individual[4], edgecolor='red')
endo_ell = create_ellipse(individual[5], individual[6], individual[7], individual[8], individual[9],
edgecolor='red')
zone_ell = create_ellipse(individual[10], individual[11], individual[12], individual[13], individual[14],
edgecolor='red')
os_image, endo_image, zone_image = extract_ellipses(a_channel, os_ell, endo_ell, zone_ell, display_image)
os_array = np.ma.MaskedArray.flatten(os_image)
endo_array = np.ma.MaskedArray.flatten(endo_image)
zone_array = np.ma.MaskedArray.flatten(zone_image)
## OS ellipse
# Homogenity of OS
os_std = np.std(os_array)
# OS mean color, darker is better
os_mean = np.mean(os_array)
## ENDO ellipse
# Homogenity of ENDO
endo_std = np.std(endo_array)
# ENDO mean color, darker is better
endo_mean = np.mean(endo_array)
## ZONE ellipse
# Homogenity of ZONE
zone_std = np.std(zone_array)
# ZONE mean color, darker is better
zone_mean = np.mean(zone_array)
return os_mean, os_std, endo_mean, endo_std, zone_mean, zone_std
def return_z_score(img):
z_image = np.array(img)
img_mean = np.mean(z_image)
img_std = np.std(z_image)
# img_n = np.prod(z_image.shape)
z_image = (z_image - img_mean ) / img_std
# z_mean = np.mean(z_image)
# z_std = np.std(z_image)
zms_image = np.array(z_image ** 2)
return z_image, zms_image
def analyze_type1_outlines_from_csv(input_csv_file, output_csv_file):
# Analyzing the images according to the outlines in the csv file
print('Input file: ', input_csv_file)
print('Output file: ', output_csv_file)
results_type1 = []
df = pd.read_csv(input_csv_file, index_col='image_id')
del df['Unnamed: 0']
for current_image, row in df.iterrows():
print('Analyzing image id %i' % current_image)
individual = row.tolist()
image = get_image_data(current_image, 'Type_1', 0.1)
_, a_image, _ = lab_channels(image, display_image=False)
#a_image, _, _ = lab_channels(image, display_image=False)
#a_image = image[:,:,2]
z, zms = return_z_score(a_image)
a_channel_analysis = analyze_individual_color(a_image, individual)
normalized_analysis = analyze_individual_color(z, individual)
norm_squared_analysis = analyze_individual_color(zms, individual)
results_type1.append([current_image] + list(a_channel_analysis)
+ list(normalized_analysis)
+ list(norm_squared_analysis))
# Writing to csv
df_results_type1_columns = ['image_id', 'color_os_mean', 'color_os_std', 'color_endo_mean',
'color_endo_std', 'color_zone_mean', 'color_zone_std', \
'norm_os_mean', 'norm_os_std', 'norm_endo_mean',
'norm_endo_std', 'norm_zone_mean', 'norm_zone_std', \
'norm_sqr_os_mean', 'norm_sqr_os_std', 'norm_sqr_endo_mean',
'norm_sqr_endo_std', 'norm_sqr_zone_mean', 'norm_sqr_zone_std']
df_results_type1 = pd.DataFrame(results_type1, columns=df_results_type1_columns)
df_results_type1.to_csv(output_csv_file)
print(df_results_type1.head())
return
def view_type1_outline_from_csv(input_csv_file, type1_image_id=48):
# Viewing the perfect chromosome for image previously outlined manually
df = pd.read_csv(input_csv_file, index_col='image_id')
del df['Unnamed: 0']
print(df.head())
image = get_image_data(type1_image_id, 'Type_1', 0.1)
individual = df.loc[type1_image_id,:]
print(individual)
# fitness with G channel, just for checking the fitness function 30.10.17
z_image, _ = return_z_score(image[:,:,1])
fitness, elements = type1_fitness(z_image, individual)
print('Fitness score is: ', fitness)
print(elements)
# Plotting the latest image
plt.subplot(121)
plt.title('Type 1 Image - %i' % type1_image_id)
plt.imshow(image)
image_outline = outline_individual_on_image(image, individual)
plt.subplot(122)
plt.title('Image with outline')
plt.imshow(image_outline)
# Show in full screen for convenience
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
plt.show()
return
def visualize_type1_outlines_color_analysis(input_csv_file):
# An easy way to view the manual outlines analysis
df = pd.read_csv(input_csv_file, index_col='image_id')
del df['Unnamed: 0']
print(df.head())
# Using seaborn for visualization
color_cols = pd.DataFrame(df, columns=['color_os_mean', 'color_endo_mean', 'color_zone_mean'])
norm_cols = | pd.DataFrame(df, columns=['norm_os_mean', 'norm_endo_mean', 'norm_zone_mean']) | pandas.DataFrame |
import argparse
import os
import sys
import numpy as np
import pandas
import plotly.express as px
import plotly.graph_objects as go
import statsmodels.api
from plotly.subplots import make_subplots
from sklearn.ensemble import RandomForestRegressor
def get_args():
# create the parser
parser = argparse.ArgumentParser(
description="Feature Graph Generator: A tool to explore feature quality by producing pretty graphs from the "
"intermediate csv file of calculated features.",
)
# adding parser arguments
parser.add_argument(
"-i",
action="store",
type=str,
dest="input",
help="the path to the input file",
)
parser.add_argument(
"-o",
action="store",
type=str,
dest="output",
help="optional flag to set the path to the output directory",
default=os.getcwd(),
)
parser.add_argument(
"-c",
action="append",
type=str,
dest="class_list",
help="optional flag for selecting only specific classes to be graphed, may enter multiple by calling this "
"flag multiple times",
default=None,
)
parser.add_argument(
"-s",
action="store_true",
dest="show",
help='optional flag to set the "show graphs" switch to True',
default=False,
)
parser.add_argument(
"-v",
action="store_true",
dest="verbose",
help='optional flag to set the "verbose" switch to True',
default=False,
)
parser.add_argument(
"--skip",
action="append",
dest="skip",
choices=["tval", "rf", "corr", "joy"],
help="optional flag to skip generating certain graphs. May be called multiple times",
default=[],
)
# executing the parse_args command
args = parser.parse_args()
# getting args
input_file = args.input
output_file = args.output
class_list = args.class_list
show = args.show
verbose = args.verbose
skip = args.skip
return input_file, output_file, class_list, show, verbose, skip
def cat_cont_correlation_ratio(categories, values):
"""
Simple function to determine the correlation ratio between a list
of categorical values and a list of continuous values.
Code provided by Julien.
"""
f_cat, _ = pandas.factorize(categories)
cat_num = np.max(f_cat) + 1
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(0, cat_num):
cat_measures = values[np.argwhere(f_cat == i).flatten()]
n_array[i] = len(cat_measures)
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.sum(np.multiply(y_avg_array, n_array)) / np.sum(n_array)
numerator = np.sum(
np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2))
)
denominator = np.sum(np.power(np.subtract(values, y_total_avg), 2))
if numerator == 0:
eta = 0.0
else:
eta = np.sqrt(numerator / denominator)
return eta
def correlation_bar_plots(df, outfile, class_list, show):
# get a list of the name of each feature if the first value in the dataframe in the target column is a number
features_list = [x for x in df.columns.to_list() if type(df[x][0]) == np.float64 or type(df[x][0]) == np.int64]
# sort it alphabetically ignoring case
features_list = sorted(features_list, key=str.lower)
# set up an empty array for correlation values. Rows = classes, Columns = features
correlation_array = np.zeros((len(class_list), len(features_list)))
for i in range(len(class_list)):
classification = class_list[i]
# figure out which rows are the correct classification
is_target_class = [
True if x == classification else False for x in df["classification"]
]
# loop through each feature from the list
for ii in range(len(features_list)):
feature = features_list[ii]
# run the correlation function between it and the classification column
correlation_array[i][ii] = round(
cat_cont_correlation_ratio(is_target_class, df[feature]), 5
)
correlation_plot = go.Figure(
data=go.Heatmap(z=correlation_array,
x=features_list,
y=class_list,
hoverongaps=False, colorscale='Plasma'),
)
correlation_plot.update_layout(
xaxis_title="Features",
yaxis_title="Phage Protein Class",
title_text=f"Correlation Ratios",
font=dict(size=12),
)
correlation_plot.update_xaxes(tickangle=45, tickfont=dict(size=12))
correlation_plot.write_html(
file=f"{outfile}/correlation heatplot.html",
include_plotlyjs=True,
)
if show:
correlation_plot.show()
return
def t_value_bar_plots(df, outfile, class_list, show):
# get a list of the name of each feature if the first value in the dataframe in the target column is a number
features_list = [x for x in df.columns.to_list() if type(df[x][0]) == np.float64 or type(df[x][0]) == np.int64]
# sort it alphabetically ignoring case
features_list = sorted(features_list, key=str.lower)
# set up an empty array for correlation values. Rows = classes, Columns = features
tval_array = np.zeros((len(class_list), len(features_list)))
for i in range(len(class_list)):
classification = class_list[i]
# figure out which rows are the correct classification
is_target_class = [
True if x == classification else False for x in df["classification"]
]
# loop through each column in the data frame and check if the first row value is a number of some kind
for ii in range(len(features_list)):
feature = features_list[ii]
if type(df[feature][0]) == np.float64 or type(df[feature][0]) == np.int64:
# if it is, get the t-value. Following code was provided by Julien.
predictor = statsmodels.api.add_constant(df[feature].to_numpy())
logistic_regression_model = statsmodels.api.Logit(
is_target_class, predictor
)
logistic_regression_fitted = logistic_regression_model.fit(disp=False)
t_value = round(logistic_regression_fitted.tvalues[1], 4)
tval_array[i][ii] = abs(t_value)
t_val_plot = go.Figure(
data=go.Heatmap(z=tval_array,
x=features_list,
y=class_list,
hoverongaps=False, colorscale='Plasma'),
)
t_val_plot.update_layout(
xaxis_title="Features",
yaxis_title="Phage Protein Class",
title_text=f"T-Values",
font=dict(size=12),
)
t_val_plot.update_xaxes(tickangle=45, tickfont=dict(size=12))
t_val_plot.write_html(
file=f"{outfile}/t-value heatplot.html",
include_plotlyjs=True,
)
if show:
t_val_plot.show()
return
def t_value_bar_plots_old(df, outfile, class_list, show):
for classification in class_list:
# set up empty dictionary
t_values = {}
# figure out which rows are the correct classification
is_target_class = [
True if x == classification else False for x in df["classification"]
]
# loop through each column in the data frame and check if the first row value is a number of some kind
for feature in df.columns.to_list():
if type(df[feature][0]) == np.float64 or type(df[feature][0]) == np.int64:
# if it is, get the t-value. Following code was provided by Julien.
predictor = statsmodels.api.add_constant(df[feature].to_numpy())
logistic_regression_model = statsmodels.api.Logit(
is_target_class, predictor
)
logistic_regression_fitted = logistic_regression_model.fit(disp=False)
t_value = round(logistic_regression_fitted.tvalues[1], 4)
t_values[feature] = abs(t_value)
# set x and y values for plotting
x_axis = list(t_values.keys())
y_axis = list(t_values.values())
# sort alphabetically using zip sort and then return data to list format
x_axis, y_axis = zip(*sorted(zip(x_axis, y_axis)))
x_axis, y_axis = list(x_axis), list(y_axis)
# set up t-value plot with layout options
t_val_plot = go.Figure(
[
go.Bar(
x=x_axis, y=y_axis, marker={"color": y_axis, "colorscale": "dense"}
)
]
)
t_val_plot.update_layout(
xaxis_title="Features",
yaxis_title="|t-value|",
title_text=f"t-values - {classification}",
font=dict(size=12),
)
t_val_plot.update_xaxes(tickangle=45, tickfont=dict(size=10))
t_val_plot.write_html(
file=f"{outfile}/t-value {classification}.html",
include_plotlyjs=True,
)
if show:
t_val_plot.show()
return
def feature_importance_bar_plot(df, outfile, class_list, show):
for classification in class_list:
# figure out which rows are the correct classification, convert into data frame
is_target_class = [
True if x == classification else False for x in df["classification"]
]
is_target_class = pandas.DataFrame(is_target_class, columns=["classification"])
# setup second data frame that drops all non-testable columns
df_rf = df
for feature in df_rf.columns.to_list():
if (
type(df_rf[feature][0]) != np.float64
and type(df_rf[feature][0]) != np.int64
):
df_rf = df_rf.drop(feature, axis=1)
# convert to numpy and flatten the is_target_class array to make RandomForestRegressor() happy
is_target_class_array = np.ravel(is_target_class.to_numpy(), order="C")
df_rf_array = df_rf.to_numpy()
# get feature importance for each column in the data frame
rf = RandomForestRegressor()
rf.fit(df_rf_array, is_target_class_array)
feature_importance = rf.feature_importances_
# set x and y values for plotting
x_axis = list(df_rf.columns)
y_axis = list(feature_importance)
# sort numerically using zip sort and then return data to list format
y_axis, x_axis = zip(*sorted(zip(y_axis, x_axis)))
x_axis, y_axis = list(x_axis), list(y_axis)
# set up correlation plot with layout options
rf_plot = go.Figure(
[
go.Bar(
x=x_axis, y=y_axis, marker={"color": y_axis, "colorscale": "Blugrn"}
)
]
)
rf_plot.update_layout(
xaxis_title="Features",
yaxis_title="RF Feature Importance Metric",
title_text=f"Random Forest Feature Importance - {classification}",
font=dict(size=12),
)
rf_plot.update_xaxes(tickangle=45, tickfont=dict(size=10))
rf_plot.write_html(
file=f"{outfile}/RF feature importance {classification}.html",
include_plotlyjs=True,
)
if show:
rf_plot.show()
return
def joy_plot(df, outfile, class_list, show):
for feature in df.columns.to_list():
if type(df[feature][0]) == np.float64 or type(df[feature][0]) == np.int64:
df_to_plot = df[["classification", feature]]
df_to_plot = df_to_plot[df["classification"].isin(class_list)]
joy_violin = px.violin(
df_to_plot,
x=feature,
color="classification",
violinmode="overlay",
points=False,
orientation="h",
color_discrete_sequence=px.colors.qualitative.Pastel1,
)
joy_violin.update_layout(height=400)
joy_violin.update_traces(width=0.9, points=False)
joy_violin.update_yaxes(range=[0, 1])
joy_violin.update_layout(title=f"Joy Plot of {feature}")
joy_violin.update_layout(xaxis_showgrid=False, xaxis_zeroline=False)
joy_violin.write_html(
file=f"{outfile}/joy plot {feature}.html",
include_plotlyjs="cdn",
)
if show:
joy_violin.show()
return
def main():
# call argparse function
infile, outfile, classes, show, verbose, skip = get_args()
# if output directory doesn't exist, create it
if not os.path.exists(outfile):
if verbose:
print("Creating new output directory")
os.makedirs(outfile)
# open dataframe from csv
if verbose:
print(f"Reading in data from {infile}")
df = | pandas.read_csv(infile, engine='python') | pandas.read_csv |
""" http://pandas.pydata.org/pandas-docs/version/0.18.0/10min.html
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# pd.test()
# Object Creation
s = pd.Series([1, 3, 5, np.NaN, 6, 8])
print(s)
dates = pd.date_range("2013-01-01", periods=6)
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
print(df)
df1 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20130102'),
'C': pd.Series(1, index=list(range(4)), dtype='float32'),
'D': np.array([3] * 4, dtype='int32'),
'E': pd.Categorical(["test", "train", "test", "train"]),
'F': 'foo'})
# Selection
df.head()
df.tail(3)
df.index
df.columns
df.values
df.describe()
df.T
df.sort_index(axis=1, ascending=False)
df.sort_values(by='B')
df['A']
df[0:3]
df['20130102':'20130104']
df.loc[dates[0]]
df.loc[:, ['A', 'B']]
df.loc['20130102':'20130104', ['A', 'B']]
df.loc['20130102', ['A', 'B']]
df.loc[dates[0], 'A']
df.at[dates[0], 'A']
df.iloc[3]
df.iloc[3:5, 0:2]
df.iloc[[1, 2, 4], [0, 2]]
df.iloc[1:3, :]
df.iloc[:, 1:3]
df.iloc[1, 1]
df.iat[1, 1] # fast than above
df[df.A > 0]
df[df > 0]
df[df < 0] = -df
df2 = df.copy()
df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three']
df2[df2['E'].isin(['two', 'four'])]
s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6))
df2['F'] = s1
df2.at[dates[0], 'A'] = 0
df2.iat[0, 1] = 0
df2.loc[:, 'D'] = np.array([5] * len(df))
# Missing Data
df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])
df1.loc[dates[0]:dates[1], 'E'] = 1
df1.dropna(how='any')
df1.fillna(value=5)
pd.isnull(df1)
# Operations
df.mean()
df.mean(1)
s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2)
df.sub(s, axis='index')
df.apply(np.cumsum)
df.apply(lambda x: x.max() - x.min())
pd.Series(np.random.randint(0, 7, size=10))
s.value_counts()
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
s.str.lower()
df = pd.DataFrame(np.random.randn(10, 4))
pieces = [df[:3], df[3:7], df[7:]]
pd.concat(pieces)
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
right = | pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) | pandas.DataFrame |
from pengle.transformer.categorical_features import TargetStatisticsEncoder, MonthEncoding, DayEncoding, TimeEncoding
from pengle.dataset.dataset import Dataset
import pandas as pd
import pytest
import mock
import numpy as np
from pandas.util.testing import assert_frame_equal
def test_target_statistics_encoder():
train_dataset = Dataset(data=pd.DataFrame([[1, 1],
[1, 1],
[1, 2],
[1, 2],
[2, 1]],
columns=['category1', 'category2']),
target=[0, 2, 3, 4, 5],
target_column='target')
test_dataset = Dataset(data=pd.DataFrame([[1, 1],
[1, 1],
[1, 2],
[1, 2],
[3, 1],
[2, 1]], columns=['category1', 'category2']),
target=[0, 2, 3, 4, 5],
target_column='target')
train, test = TargetStatisticsEncoder(groupby_keys=['category1', 'category2']).fit(
train_dataset, test_dataset).transform(save=False)
df_expected = pd.DataFrame([[2.25, 4, 2.916667, 1.707825, 2.5, 2.333333, 5, 6.333333, 2.516611, 2.0],
[2.25, 4, 2.916667, 1.707825, 2.5, 2.333333, 5, 6.333333, 2.516611, 2.0],
[2.25, 4, 2.916667, 1.707825, 2.5, 3.500000, 4, 0.500000, 0.707107, 3.5],
[2.25, 4, 2.916667, 1.707825, 2.5, 3.500000, 4, 0.500000, 0.707107, 3.5],
[5.00, 5, np.nan, np.nan, 5.0, 2.333333, 5, 6.333333, 2.516611, 2.0]],
columns=['target_enc_mean_category1', 'target_enc_max_category1',
'target_enc_var_category1', 'target_enc_std_category1',
'target_enc_median_category1', 'target_enc_mean_category2',
'target_enc_max_category2', 'target_enc_var_category2',
'target_enc_std_category2', 'target_enc_median_category2'])
assert_frame_equal(train, df_expected)
df_expected = pd.DataFrame([[2.25, 4.0, 2.916667, 1.707825, 2.5, 2.333333, 5, 6.333333, 2.516611, 2.0],
[2.25, 4.0, 2.916667, 1.707825, 2.5, 2.333333, 5, 6.333333, 2.516611, 2.0],
[2.25, 4.0, 2.916667, 1.707825, 2.5, 3.500000, 4, 0.500000, 0.707107, 3.5],
[2.25, 4.0, 2.916667, 1.707825, 2.5, 3.500000, 4, 0.500000, 0.707107, 3.5],
[np.nan, np.nan, np.nan, np.nan, np.nan, 2.333333, 5, 6.333333, 2.516611, 2.0],
[5.00, 5.0, np.nan, np.nan, 5.0, 2.333333, 5, 6.333333, 2.516611, 2.0]],
columns=['target_enc_mean_category1', 'target_enc_max_category1',
'target_enc_var_category1', 'target_enc_std_category1',
'target_enc_median_category1', 'target_enc_mean_category2',
'target_enc_max_category2', 'target_enc_var_category2',
'target_enc_std_category2', 'target_enc_median_category2'])
assert_frame_equal(test, df_expected)
def test_month_encoding():
train_dataset = Dataset(data=pd.DataFrame([['2017-01-01 01:24:00', '2017-11-01 12:24:00'],
['2017-02-02 02:24:00', '2017-11-01 12:24:00'],
['2017-03-03 03:24:00', '2017-11-01 12:24:00'],
['2017-04-04 04:24:00', '2017-11-01 12:24:00'],
['2017-05-05 05:24:00', '2017-11-01 12:24:00'],
['2017-06-06 06:24:00', '2017-11-01 12:24:00'],
['2017-07-07 07:24:00', '2017-11-01 12:24:00'],
['2017-08-08 08:24:00', '2017-11-01 12:24:00'],
['2017-09-09 09:24:00', '2017-11-01 12:24:00'],
['2017-10-10 10:24:00', '2017-11-01 12:24:00'],
['2017-11-11 11:24:00', '2017-11-01 12:24:00'],
['2017-12-12 12:24:00', '2017-11-01 12:24:00']],
columns=['date1', 'date2']), target=None)
test_dataset = Dataset(data=pd.DataFrame([['2017-01-01 01:24:00', '2017-11-01 12:24:00'],
['2017-02-02 02:24:00', '2017-11-01 12:24:00'],
['2017-03-03 03:24:00', '2017-11-01 12:24:00'],
['2017-04-04 04:24:00', '2017-11-01 12:24:00'],
['2017-05-05 05:24:00', '2017-11-01 12:24:00'],
['2017-06-06 06:24:00', '2017-11-01 12:24:00'],
['2017-07-07 07:24:00', '2017-11-01 12:24:00'],
['2017-08-08 08:24:00', '2017-11-01 12:24:00'],
['2017-09-09 09:24:00', '2017-11-01 12:24:00'],
['2017-10-10 10:24:00', '2017-11-01 12:24:00'],
['2017-11-11 11:24:00', '2017-11-01 12:24:00'],
['2017-12-12 12:24:00', '2017-11-01 12:24:00']],
columns=['date1', 'date2']), target=None)
df_expected = pd.DataFrame([[8.660254e-01, 5.000000e-01, 1.0, -1.133108e-15],
[6.123234e-17, 1.000000e+00, 1.0, -1.133108e-15],
[-5.000000e-01, 8.660254e-01, 1.0, -1.133108e-15],
[-8.660254e-01, 5.000000e-01, 1.0, -1.133108e-15],
[-1.000000e+00, 1.224647e-16, 1.0, -1.133108e-15],
[-8.660254e-01, -5.000000e-01, 1.0, -1.133108e-15],
[-5.000000e-01, -8.660254e-01, 1.0, -1.133108e-15],
[-1.836970e-16, -1.000000e+00, 1.0, -1.133108e-15],
[5.000000e-01, -8.660254e-01, 1.0, -1.133108e-15],
[18.660254e-01, -5.000000e-01, 1.0, -1.133108e-15],
[11.000000e+00, -2.449294e-16, 1.0, -1.133108e-15]],
columns=['date1_cos', 'date1_sin', 'date2_cos', 'date2_sin'])
train, test = MonthEncoding(columns=['date1', 'date2']).fit(train_dataset, test_dataset).transform(save=False)
# 計算誤差があるため
assert (train - df_expected).sum().sum() < 0.001
assert (test - df_expected).sum().sum() < 0.001
def test_day_encoding():
train_dataset = Dataset(data=pd.DataFrame([['2017-01-01 01:24:00', '2017-11-01 12:24:00'],
['2017-02-02 02:24:00', '2017-11-01 12:24:00'],
['2017-03-03 03:24:00', '2017-11-01 12:24:00'],
['2017-04-04 04:24:00', '2017-11-01 12:24:00'],
['2017-05-05 05:24:00', '2017-11-01 12:24:00'],
['2017-06-06 06:24:00', '2017-11-01 12:24:00'],
['2017-07-07 07:24:00', '2017-11-01 12:24:00'],
['2017-08-08 08:24:00', '2017-11-01 12:24:00'],
['2017-09-09 09:24:00', '2017-11-01 12:24:00'],
['2017-10-10 10:24:00', '2017-11-01 12:24:00'],
['2017-11-11 11:24:00', '2017-11-01 12:24:00'],
['2017-12-12 12:24:00', '2017-11-01 12:24:00'],
['2017-11-13 12:24:00', '2017-11-01 12:24:00'],
['2017-11-14 12:24:00', '2017-11-01 12:24:00'],
['2017-11-15 12:24:00', '2017-11-01 12:24:00'],
['2017-11-16 12:24:00', '2017-11-01 12:24:00'],
['2017-11-17 12:24:00', '2017-11-01 12:24:00'],
['2017-11-18 12:24:00', '2017-11-01 12:24:00'],
['2017-11-19 12:24:00', '2017-11-01 12:24:00'],
['2017-11-20 12:24:00', '2017-11-01 12:24:00'],
['2017-11-21 12:24:00', '2017-11-01 12:24:00'],
['2017-11-22 12:24:00', '2017-11-01 12:24:00'],
['2017-11-23 12:24:00', '2017-11-01 12:24:00'],
['2017-11-24 12:24:00', '2017-11-01 12:24:00'],
['2017-11-25 12:24:00', '2017-11-01 12:24:00'],
['2017-11-26 12:24:00', '2017-11-01 12:24:00'],
['2017-11-27 12:24:00', '2017-11-01 12:24:00'],
['2017-11-28 12:24:00', '2017-11-01 12:24:00'],
['2017-11-29 12:24:00', '2017-11-01 12:24:00'],
['2017-11-30 12:24:00', '2017-11-01 12:24:00']], columns=['date1', 'date2']), target=None)
test_dataset = Dataset(data=pd.DataFrame([['2017-11-01 12:24:00', '2017-11-01 12:24:00'],
['2017-11-02 12:24:00', '2017-11-01 12:24:00'],
['2017-11-03 12:24:00', '2017-11-01 12:24:00'],
['2017-11-04 12:24:00', '2017-11-01 12:24:00'],
['2017-11-05 12:24:00', '2017-11-01 12:24:00'],
['2017-11-06 12:24:00', '2017-11-01 12:24:00'],
['2017-11-07 12:24:00', '2017-11-01 12:24:00'],
['2017-11-08 12:24:00', '2017-11-01 12:24:00'],
['2017-11-09 12:24:00', '2017-11-01 12:24:00'],
['2017-11-10 12:24:00', '2017-11-01 12:24:00'],
['2017-11-11 12:24:00', '2017-11-01 12:24:00'],
['2017-11-12 12:24:00', '2017-11-01 12:24:00'],
['2017-11-13 12:24:00', '2017-11-01 12:24:00'],
['2017-11-14 12:24:00', '2017-11-01 12:24:00'],
['2017-11-15 12:24:00', '2017-11-01 12:24:00'],
['2017-11-16 12:24:00', '2017-11-01 12:24:00'],
['2017-11-17 12:24:00', '2017-11-01 12:24:00'],
['2017-11-18 12:24:00', '2017-11-01 12:24:00'],
['2017-11-19 12:24:00', '2017-11-01 12:24:00'],
['2017-11-20 12:24:00', '2017-11-01 12:24:00'],
['2017-11-21 12:24:00', '2017-11-01 12:24:00'],
['2017-11-22 12:24:00', '2017-11-01 12:24:00'],
['2017-11-23 12:24:00', '2017-11-01 12:24:00'],
['2017-11-24 12:24:00', '2017-11-01 12:24:00'],
['2017-11-25 12:24:00', '2017-11-01 12:24:00'],
['2017-11-26 12:24:00', '2017-11-01 12:24:00'],
['2017-11-27 12:24:00', '2017-11-01 12:24:00'],
['2017-11-28 12:24:00', '2017-11-01 12:24:00'],
['2017-11-29 12:24:00', '2017-11-01 12:24:00'],
['2017-11-30 12:24:00', '2017-11-01 12:24:00']], columns=['date1', 'date2']), target=None)
train, test = DayEncoding(columns=['date1', 'date2']).fit(train_dataset, test_dataset).transform(save=False)
df_expected = pd.DataFrame([[0.978148, 2.079117e-01, 1.0, -2.449294e-16],
[0.913545, 4.067366e-01, 1.0, -2.449294e-16],
[0.809017, 5.877853e-01, 1.0, -2.449294e-16],
[0.669131, 7.431448e-01, 1.0, -2.449294e-16],
[0.500000, 8.660254e-01, 1.0, -2.449294e-16],
[0.309017, 9.510565e-01, 1.0, -2.449294e-16],
[0.104528, 9.945219e-01, 1.0, -2.449294e-16],
[-0.104528, 9.945219e-01, 1.0, -2.449294e-16],
[-0.309017, 9.510565e-01, 1.0, -2.449294e-16],
[-0.500000, 8.660254e-01, 1.0, -2.449294e-16],
[-0.669131, 7.431448e-01, 1.0, -2.449294e-16],
[-0.809017, 5.877853e-01, 1.0, -2.449294e-16],
[-0.913545, 4.067366e-01, 1.0, -2.449294e-16],
[-0.978148, 2.079117e-01, 1.0, -2.449294e-16],
[-1.000000, 5.665539e-16, 1.0, -2.449294e-16],
[-0.978148, -2.079117e-01, 1.0, -2.449294e-16],
[-0.913545, -4.067366e-01, 1.0, -2.449294e-16],
[-0.809017, -5.877853e-01, 1.0, -2.449294e-16],
[-0.669131, -7.431448e-01, 1.0, -2.449294e-16],
[-0.500000, -8.660254e-01, 1.0, -2.449294e-16],
[-0.309017, -9.510565e-01, 1.0, -2.449294e-16],
[-0.104528, -9.945219e-01, 1.0, -2.449294e-16],
[0.104528, -9.945219e-01, 1.0, -2.449294e-16],
[0.309017, -9.510565e-01, 1.0, -2.449294e-16],
[0.500000, -8.660254e-01, 1.0, -2.449294e-16],
[0.669131, -7.431448e-01, 1.0, -2.449294e-16],
[0.809017, -5.877853e-01, 1.0, -2.449294e-16],
[0.913545, -4.067366e-01, 1.0, -2.449294e-16],
[0.978148, -2.079117e-01, 1.0, -2.449294e-16],
[1.000000, -1.133108e-15, 1.0, -2.449294e-16]],
columns=['date1_cos', 'date1_sin', 'date2_cos', 'date2_sin'])
assert_frame_equal(train, df_expected)
| assert_frame_equal(test, df_expected) | pandas.util.testing.assert_frame_equal |
"""
fit motor circle task with external data (not simulated)
"""
import sys, os
import numpy as np
import pandas as pd
import stan
import arviz as az
import nest_asyncio
nest_asyncio.apply()
from matplotlib import pyplot as plt
import seaborn as sns
sys.path.append('.')
from simulations.sim_bandit3arm_combined import bandit_combined_preprocess_func
from visualisation.hdi_compare import hdi, hdi_diff
def extract_ind_results(df,pars_ind,data_dict):
out_col_names = []
out_df = np.zeros([data_dict['N'],len(pars_ind)*2])
i=0
for ind_par in pars_ind:
pattern = r'\A'+ind_par+r'.\d+'
out_col_names.append(ind_par+'_mean')
out_col_names.append(ind_par+'_std')
mean_val=df.iloc[:,df.columns.str.contains(pattern)].mean(axis=0).to_frame()
std_val=df.iloc[:,df.columns.str.contains(pattern)].std(axis=0).to_frame()
out_df[:,2*i:2*(i+1)] = np.concatenate([mean_val.values,std_val.values],axis=1)
i+=1
out_df = pd.DataFrame(out_df,columns=out_col_names)
beh_col_names = ['total','avg_rt','std_rt']
total_np = 100+data_dict['rew'].sum(axis=1,keepdims=True)+data_dict['los'].sum(axis=1,keepdims=True)
avg_rt_np = data_dict['rt'].mean(axis=1,keepdims=True)
std_rt_np = data_dict['rt'].std(axis=1,keepdims=True)
beh_df = pd.DataFrame(np.concatenate([total_np,avg_rt_np,std_rt_np],axis=1),columns=beh_col_names)
out_df = beh_df.join(out_df)
return out_df
def plot_violin_params_mean(model_name,param_ls, groups_comp):
"""plot violin of param means"""
df_all = pd.DataFrame()
palettes = {}
pcols = ["b", ".85"];
split_viol=len(groups_comp)==2
for g in range(0,len(groups_comp)):
csv_params = f'./data_output/'+model_name+'_mydata/mydata_fit_group_trace'+groups_comp[g]+'.csv'
df = pd.read_csv(csv_params)
df['group'] = groups_comp[g]
df_all = df_all.append(df)
palettes[groups_comp[g]] = pcols[g]
df_all_new = df_all.melt(id_vars='group',var_name='parameter')
n_param = len(param_ls)
fig, ax = plt.subplots(1,n_param,figsize=(15,5))
leg_box = (-1,-0.1)
pcols = ["b", ".85"];
sns.set_theme(style="whitegrid")
for n in range(n_param):
g = sns.violinplot(data=df_all_new[df_all_new['parameter']==param_ls[n]], x="parameter", y="value",hue='group', split=split_viol, linewidth=1,palette=palettes, ax=ax[n])
sns.despine(left=True)
g.set(ylabel=None)
ax[n].get_legend().remove()
ax[n].tick_params(axis='y', labelsize=8)
g.set(xlabel=None)
if split_viol:
plt.legend(loc='upper center', bbox_to_anchor=leg_box,
fancybox=True, shadow=True, ncol=2)
save_dir = './data_output/'+model_name+'_mydata/'
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
save_name = 'violin_gr_plots_mean'+''.join(groups_comp)+'.png'
fig.savefig(save_dir+save_name,bbox_inches='tight',pad_inches=0)
def comp_hdi_mean_data(model_name,param_ls, groups_comp=None):
"""
compare hdi by drawing simulations (trace means)
"""
output_dir = './data_output/'+model_name+'_mydata/'
if groups_comp != ['']:
gr1_file = os.path.join(output_dir,'mydata_fit_group_trace'+groups_comp[0]+'.csv')
gr2_file = os.path.join(output_dir,'mydata_fit_group_trace'+groups_comp[1]+'.csv')
gr1_dict = pd.read_csv(gr1_file)
gr2_dict = pd.read_csv(gr2_file)
else:
gr_file = os.path.join(output_dir,'mydata_fit_group_trace.csv')
gr_dict = | pd.read_csv(gr_file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 08:54:32 2021
OK so far:
swoosh h2o: 1994-2019 30S to 30N mean, 82 hpa
regressors:
QBO_CDAS = +5 months lag correlated with h2o: 0.508
Anom_nino3p4 = no lags corr with h2o: -0.167
LR:
no CV does R2 of 0.2857
Cross validate 5 kfolds: mean R2: 0.1786 std R2: 0.245
SVM:
CV 5 kfolds: mean R2: 0.418, mean adj_R2: 0.408,
std R2: 0.047, std adj_R2: 0.0485
need to plot residuals with best model.
@author: shlomi
"""
from strat_paths import work_chaim
ml_path = work_chaim / 'ML'
def split_qbo_en_ln_neut_enso(qbo):
from make_regressors import load_all_regressors
ln = load_all_regressors()['LN'].dropna('time')
en = load_all_regressors()['EN'].dropna('time')
neut = load_all_regressors()['neutENSO'].dropna('time')
qbo_en = qbo.where(en>=0.5).fillna(0)
qbo_en.name = 'qbo_en'
qbo_ln = qbo.where(ln<=-0.5).fillna(0)
qbo_ln.name = 'qbo_ln'
qbo_neut = qbo.where(neut!=0).fillna(0)
qbo_neut.name = 'qbo_neut'
return qbo_en, qbo_ln, qbo_neut
# def CV_splitter_for_xarray_time_series(X_da, time_dim='time', grp='year'):
# groups = X_da.groupby('{}.{}'.format(time_dim, grp)).groups
# sorted_groups = [value for (key, value) in sorted(groups.items())]
# cv = [(sorted_groups[i] + sorted_groups[i+1], sorted_groups[i+2])
# for i in range(len(sorted_groups)-2)]
# return cv\
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index().drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(9, 3), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X = produce_X(lag={'qbo_cdas': 5}, syear='1994',
eyear='2019', add_co2=False)
y = produce_y(detrend='lowess',
lat_band_mean=[-15, 15], syear='1994', eyear='2019', standertize=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
ax = plt.gca()
ax.set_xlabel(r'H$_{2}$O anomalies (STD) (Red is positive)')
return
def plot_Tree_explainer_shap(rf_model):
import shap
from sklearn.model_selection import train_test_split
X = produce_X(lag={'qbo_cdas': 5})
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X = X.sel(time=slice('1994', '2019'))
y = y.sel(time=slice('1994', '2019'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
fi = dict(zip(dfX.columns, rf_model.feature_importances_ * 100))
print(fi)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return
def plot_model_prediction_fig_3():
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from sklearn.linear_model import LinearRegression
import seaborn as sns
import matplotlib.pyplot as plt
X = produce_X()
X = add_enso2_and_enso_qbo_to_X(X)
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X_test = X.sel(time=slice('1994', '2019'))
y_test = y.sel(time=slice('1994', '2019'))
X_train = X.sel(time=slice('2005', '2019'))
y_train = y.sel(time=slice('2005', '2019'))
lr = LinearRegression()
rds = make_results_for_MLR(lr, X_train, y_train, X_test=X_test, y_test=y_test)
df = rds['predict'].to_dataframe()
df['y_true'] = y_test.to_dataframe()
df['resid'] = df['predict'] - df['y_true']
df = df.rename({'resid': 'Residuals', 'predict': 'MLR', 'y_true': 'SWOOSH'}, axis=1)
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(2, 1, figsize=(18, 7))
df[['SWOOSH', 'MLR']].plot(ax=ax[0], color=['tab:purple', 'tab:red'])
df[['Residuals']].plot(ax=ax[1], color='k', legend=False)
[x.grid(True) for x in ax]
[x.set_xlabel('') for x in ax]
ax[0].set_ylabel(r'H$_{2}$O anomalies [std]')
ax[1].set_ylabel(r'H$_{2}$O residuals [std]')
[x.xaxis.set_minor_locator(AutoMinorLocator()) for x in ax]
[x.xaxis.grid(True, which='minor') for x in ax]
# legend = ax.legend(prop={'size': 13}, ncol=5, loc='upper left')
plot_forecast_busts_lines_datetime(ax[0], color='k')
fig.tight_layout()
# # get handles and labels of legend:
# hands, labes = ax.get_legend_handles_labels()
# colors = [x.get_color() for x in hands]
# # change the text labels to the colors of the lines:
# for i, text in enumerate(legend.get_texts()):
# text.set_color(colors[i])
return fig
def plot_beta_coeffs(rds, col_wrap=3, figsize=(13, 6), extent=[-170, 170, -57.5, 57.5], drop_co2=True):
import cartopy.crs as ccrs
import seaborn as sns
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.scientific import diverging as divsci
from strato_figures import remove_regressors_and_set_title
predict_cmap = divsci.Vik_20.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
proj = ccrs.PlateCarree(central_longitude=0)
plt_kwargs = dict(add_colorbar=False,
col_wrap=col_wrap,
cmap=predict_cmap, center=0.0, extend='max', vmax=0.6,
levels=41, subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=figsize)
label = r'$\beta$ coefficients'
gl_list = []
if drop_co2:
rds = rds.drop_sel(regressor='co2')
plt_kwargs.update(extend=None, vmax=None, col_wrap=2)
fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
cbar_kws = {'label': '', 'format': '%0.2f'}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .035]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
for ax in fg.axes.flatten():
ax.coastlines()
ax.set_extent(extent, crs=ccrs.PlateCarree())
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
ax = remove_regressors_and_set_title(ax)
gl_list[0].ylabels_right = False
gl_list[1].ylabels_right = False
gl_list[1].ylabels_left = True
gl_list[2].ylabels_right = False
gl_list[3].ylabels_left = True
gl_list[3].ylabels_right = True
try:
gl_list[3].ylabels_right = False
except IndexError:
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.93,
bottom=0.2,
left=0.05,
right=0.979,
hspace=0.275,
wspace=0.044)
# fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
# cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025])
# fg.add_colorbar(cax=cbar_ax, orientation="horizontal", label='',
# format='%0.3f')
# # fg.fig.suptitle(label, fontsize=12, fontweight=750)
# [ax.coastlines() for ax in fg.axes.flatten()]
# [ax.gridlines(
# crs=ccrs.PlateCarree(),
# linewidth=1,
# color='black',
# alpha=0.5,
# linestyle='--',
# draw_labels=False) for ax in fg.axes.flatten()]
# fg.fig.subplots_adjust(bottom=0.2, top=0.9, left=0.05)
return fg
def plot_r2_map_predictor_sets_with_co2(path=work_chaim, cpt_source='randel',
save=True):
"""r2 map (lat-lon) for cdas-plags, enso, ch4"""
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
from strato_figures import remove_regressors_and_set_title
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.colorbrewer import sequential as seqbr
# from palettable.scientific import diverging as divsci
# from palettable.colorbrewer import diverging as divbr
from strat_paths import savefig_path
error_cmap = seqbr.YlGnBu_9.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
titles =[r'(a) $\sum_{i=0}^{5}$CPT(t-$i$)',
r'(b) $\eta_1$ = QBO + ENSO + CO$_2$',
r'(c) $\eta_1$ + QBO $\times$ ENSO + ENSO$^2$',
r'(d) $\eta_1$ + T500 + BDC']
# rds1 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_2004-2019.nc')
# rds2 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_bdc_t500_2004-2019.nc')
# rds3 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_radio_cold_lags6_2004-2019.nc')
# rds4 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_poly_2_no_qbo^2_no_ch4_extra_2004-2019.nc')
rds1 = produce_rds_etas(eta=3, cpt_source=cpt_source)
rds2 = produce_rds_etas(eta=1)
rds3 = produce_rds_etas(eta=4)
rds4 = produce_rds_etas(eta=2)
rds = xr.concat([x['r2'] for x in [rds1, rds2, rds3, rds4]], 'eta')
rds['eta'] = range(1, 5)
rds = rds.sortby('eta')
# fig = plt.figure(figsize=(11, 5))
# ax = fig.add_subplot(1, 1, 1,
# projection=ccrs.PlateCarree(central_longitude=0))
# ax.coastlines()
proj = ccrs.PlateCarree(central_longitude=0)
fg = rds.plot.contourf(col='eta', add_colorbar=False, cmap=error_cmap,
vmin=0.0, extend=None, levels=41, col_wrap=2,
subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=(13, 6))
# lons = rds.lon.values[0:int(len(rds.lon.values) / 2)][::2]
# lons_mirror = abs(lons[::-1])
# lons = np.concatenate([lons, lons_mirror])
# lats = rds.lat.values[0:int(len(rds.lat.values) / 2)][::2]
# lats_mirror = abs(lats[::-1])
# lats = np.concatenate([lats, lats_mirror])
# ax.set_xticks(lons, crs=ccrs.PlateCarree())
# ax.set_yticks(lats, crs=ccrs.PlateCarree())
# lon_formatter = LongitudeFormatter(zero_direction_label=True)
# lat_formatter = LatitudeFormatter()
# ax.xaxis.set_major_formatter(lon_formatter)
# ax.yaxis.set_major_formatter(lat_formatter)
cbar_kws = {'label': '', 'format': '%0.2f', 'aspect': 20}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
gl_list = []
for i, ax in enumerate(fg.axes.flatten()):
ax.coastlines()
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
if i == 0:
plt.rcParams['axes.titlepad'] = 16
else:
plt.rcParams['axes.titlepad'] = 6
ax.set_title(titles[i])
# ax = remove_regressors_and_set_title(ax)
# gl_list[0].ylabels_right = False
# gl_list[2].ylabels_left = False
# try:
# gl_list[3].ylabels_right = False
# except IndexError:
# pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92,
bottom=0.16,
left=0.065,
right=0.935,
hspace=0.0,
wspace=0.208)
print('Caption: ')
print('The adjusted R^2 for the water vapor anomalies MLR analysis in the 82 hPa level with CH4 ,ENSO, and pressure level lag varied QBO as predictors. This MLR spans from 2004 to 2018')
filename = 'MLR_H2O_r2_map_82_eta_with_co2.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def produce_rds_etas(eta=1, cpt_source='randel'):
""" run produce_MLR_2D_for_figs_6_and_7 with regressors:
eta=1 : co2, anom_nino3p4, qbo_lagged
eta=2 : co2, anom_nino3p4, qbo_lagged, T500, BDC
eta=3 : co2, anom_nino3p4, qbo_lagged + 6XCPT_lagged
eta=4 : co2, anom_nino3p4, qbo_lagged, anom_nino3p4^2, qbo_laggedXanom_nino3p4
co2 is automatically added"""
pred = ['qbo_cdas', 'anom_nino3p4']
if eta == 1:
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 2:
pred = pred + ['era5_bdc70', 'era5_t500']
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 3:
if cpt_source == 'randel':
pred = ['radio_cold_no_qbo']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['radio_cold_no_qbo', 6])
elif cpt_source == 'sean':
pred = ['cpt_ERA5']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['cpt_ERA5', 6])
print('producing eta {} with {}'.format(eta, pred))
elif eta == 4:
print('producing eta {} with {} and enso^2'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=True)
return rds
def produce_MLR_2D_for_figs_6_and_7(predictors=['qbo_cdas', 'anom_nino3p4'],
lag={'qbo_cdas': 5}, add_enso2=True,
reg_shift=None):
from sklearn.linear_model import LinearRegression
if [x for x in lag.keys()][0] not in predictors:
lag = None
X = produce_X(lag=lag, regressors=predictors, add_co2=True,
reg_shift=reg_shift, standertize=False)
if add_enso2:
X = add_enso2_and_enso_qbo_to_X(X)
X = X.sel(time=slice('2005', '2019'))
y = produce_y(detrend=None, lat_band_mean=None, plevel=82, deseason='std',
filename='swoosh_lonlatpress-20deg-5deg.nc', sw_var='combinedanomh2oq')
y = y.sel(lat=slice(-60, 60))
y = y.sel(time=X.time)
lr = LinearRegression()
rds = make_results_for_MLR(lr, X, y)
return rds
def make_results_for_MLR(lr, X_train, y_train, X_test=None, y_test=None):
import xarray as xr
from sklearn.metrics import r2_score
if len(y_train.dims) > 1:
# assume sample dim is time:
target_dims = [x for x in y_train.dims if x != 'time']
# infer reg_dim from X:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
ys_train = y_train.stack(targets=target_dims)
# fit the model:
lr.fit(X_train, ys_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=['targets', reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time', 'targets'])
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time', 'targets'])
# produce R^2:
if y_test is not None:
ys_test = y_test.stack(targets=target_dims)
r2 = r2_score(ys_test, rds['predict'], multioutput='raw_values')
else:
r2 = r2_score(ys_train, rds['predict'], multioutput='raw_values')
rds['r2'] = xr.DataArray(r2, dims='targets')
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['time'] = ys_train['time']
rds['targets'] = ys_train['targets']
# unstack:
rds = rds.unstack('targets')
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
elif len(y_train.dims) == 1:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
# fit the model:
lr.fit(X_train, y_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=[reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time'])
rds['time'] = y_test['time']
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time'])
rds['time'] = y_train['time']
# produce R^2:
if y_test is not None:
r2 = r2_score(y_test, rds['predict'])
else:
r2 = r2_score(y_train, rds['predict'])
rds['r2'] = xr.DataArray(r2)
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
return rds
def plot_forecast_busts_lines_datetime(ax, color='r', style='--'):
import pandas as pd
dts = ['2010-11', '2011-04', '2015-09', '2016-01', '2016-09', '2017-01']
dts = [ | pd.to_datetime(x) | pandas.to_datetime |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(common.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, series._set_index, None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, series._set_index,
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series.fromValue(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
strings = Series.fromValue('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
d = datetime.now()
dates = Series.fromValue(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
def test_contains(self):
common.assert_contains_all(self.ts.index, self.ts)
def test_save_load(self):
self.series.save('tmp1')
self.ts.save('tmp3')
unp_series = Series.load('tmp1')
unp_ts = Series.load('tmp3')
os.remove('tmp1')
os.remove('tmp3')
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(Exception, self.ts.__getitem__, d),
def test_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(common.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(common.randn(1000), index=np.arange(1000)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
def test_toString(self):
from cStringIO import StringIO
self.ts.toString(buffer=StringIO())
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
self.assert_(self.ts.keys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_stats(self):
self.series[5:15] = np.NaN
s1 = np.array(self.series)
s1 = s1[-np.isnan(s1)]
self.assertEquals(np.min(s1), self.series.min())
self.assertEquals(np.max(s1), self.series.max())
self.assertEquals(np.sum(s1), self.series.sum())
self.assertEquals(np.mean(s1), self.series.mean())
self.assertEquals(np.std(s1, ddof=1), self.series.std())
self.assertEquals(np.var(s1, ddof=1), self.series.var())
try:
from scipy.stats import skew
common.assert_almost_equal(skew(s1, bias=False),
self.series.skew())
except ImportError:
pass
self.assert_(not np.isnan(np.sum(self.series)))
self.assert_(not np.isnan(np.mean(self.series)))
self.assert_(not np.isnan(np.std(self.series)))
self.assert_(not np.isnan(np.var(self.series)))
self.assert_(not np.isnan(np.min(self.series)))
self.assert_(not np.isnan(np.max(self.series)))
self.assert_(np.isnan(Series([1.], index=[1]).std()))
self.assert_(np.isnan(Series([1.], index=[1]).var()))
self.assert_(np.isnan(Series([1.], index=[1]).skew()))
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op):
cython_or_numpy = op(series, other)
python = series.combineFunc(other, op)
common.assert_almost_equal(cython_or_numpy, python)
def check(other):
_check_op(other, operator.add)
_check_op(other, operator.sub)
_check_op(other, operator.div)
_check_op(other, operator.mul)
_check_op(other, operator.pow)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.div(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x))
check(self.ts * 2)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
deltas = Series([timedelta(1)] * 5, index=np.arange(5))
sub_deltas = deltas[::2]
deltas5 = deltas * 5
deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
common.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
self.assertRaises(Exception, self.ts.__pow__, df)
def test_combineFirst(self):
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combineFirst(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combineFirst(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = common.makeStringIndex(20)
floats = Series(common.randn(20), index=index)
strings = Series(common.makeStringIndex(10), index=index[::2])
combined = strings.combineFirst(floats)
common.assert_dict_equal(strings, combined, compare_keys=False)
common.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combineFirst(Series([], index=[]))
assert_series_equal(s, result)
def test_overloads(self):
methods = ['argsort', 'cumsum', 'cumprod']
for method in methods:
func = getattr(np, method)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
argsorted = self.ts.argsort()
self.assert_(argsorted.dtype == np.int_)
def test_median(self):
self.assertAlmostEqual(np.median(self.ts), self.ts.median())
ts = self.ts.copy()
ts[::2] = np.NaN
self.assertAlmostEqual(np.median(ts.valid()), ts.median())
def test_corr(self):
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# additional checks?
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assert_(np.isnan(result[-5:]).all())
self.assert_(np.array_equal(result[:-5], np.sort(vals[5:])))
result = ts.order(missingAtEnd=False)
self.assert_(np.isnan(result[:5]).all())
self.assert_(np.array_equal(result[5:], np.sort(vals[5:])))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
def test_map(self):
result = self.ts.map(lambda x: x * 2)
self.assert_(np.array_equal(result, self.ts * 2))
def test_toCSV(self):
self.ts.toCSV('_foo')
os.remove('_foo')
def test_toDict(self):
self.assert_(np.array_equal(Series(self.ts.toDict()), self.ts))
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
common.assert_dict_equal(result, ts, compare_keys=False)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
common.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, offset=offset)
unshifted = shifted.shift(-1, offset=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, offset=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, timeRule='WEEKDAY')
unshifted = shifted.shift(-1, timeRule='WEEKDAY')
| assert_series_equal(unshifted, self.ts) | pandas.util.testing.assert_series_equal |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="fixed")
msg = r"invalid HDFStore format specified \[foo\]"
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=False, format="foo")
# File path doesn't exist
path = ""
msg = f"File {path} does not exist"
with pytest.raises(FileNotFoundError, match=msg):
read_hdf(path, "df")
def test_get(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
def test_put_integer(setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
_check_roundtrip(df, tm.assert_frame_equal, setup_path)
def test_table_values_dtypes_roundtrip(setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
msg = re.escape(
"invalid combination of [values_axes] on appending data "
"[name->values_block_0,cname->values_block_0,"
"dtype->float64,kind->float,shape->(1, 3)] vs "
"current table [name->values_block_0,"
"cname->values_block_0,dtype->int64,kind->integer,"
"shape->None]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_series(setup_path):
s = tm.makeStringSeries()
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
_check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
_check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_tuple_index(setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
_check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
_check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
if is_platform_windows():
pytest.xfail("known failure on some windows platforms")
else:
raise
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_frame(compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
_check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
_check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
_check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._mgr.is_consolidated()
# empty
_check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
def test_empty_series_frame(setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
_check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
_check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
_check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"])
def test_empty_series(dtype, setup_path):
s = Series(dtype=dtype)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(setup_path, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_store_mixed(compression, setup_path):
def _make_one():
df = | tm.makeDataFrame() | pandas._testing.makeDataFrame |
import os
import pandas as pd
import time
import numpy as np
start=time.time()
#folder_location
lookup_data='./SHU-descriptors/'
shell_file='./perovskite_shell_10A/'
dic = {
'H': 1,
'He': 2,
'Li': 3,
'Be': 4,
'B': 5,
'C': 6,
'N': 7,
'O': 8,
'F': 9,
'Ne': 10,
'Na': 11,
'Mg': 12,
'Al': 13,
'Si': 14,
'P': 15,
'S': 16,
'Cl': 17,
'Ar': 18,
'K': 19,
'Ca': 20,
'Sc': 21,
'Ti': 22,
'V': 23,
'Cr': 24,
'Mn': 25,
'Fe': 26,
'Co': 27,
'Ni': 28,
'Cu': 29,
'Zn': 30,
'Ga': 31,
'Ge': 32,
'As': 33,
'Se': 34,
'Br': 35,
'Kr': 36,
'Rb': 37,
'Sr': 38,
'Y': 39,
'Zr': 40,
'Nb': 41,
'Mo': 42,
'Tc': 43,
'Ru': 44,
'Rh': 45,
'Pd': 46,
'Ag': 47,
'Cd': 48,
'In': 49,
'Sn': 50,
'Sb': 51,
'Te': 52,
'I': 53,
'Xe': 54,
'Cs': 55,
'Ba': 56,
'La': 57,
'Ce': 58,
'Pr': 59,
'Nd': 60,
'Pm': 61,
'Sm': 62,
'Eu': 63,
'Gd': 64,
'Tb': 65,
'Dy': 66,
'Ho': 67,
'Er': 68,
'Tm': 69,
'Yb': 70,
'Lu': 71,
'Hf': 72,
'Ta': 73,
'W': 74,
'Re': 75,
'Os': 76,
'Ir': 77,
'Pt': 78,
'Au': 79,
'Hg': 80,
'Tl': 81,
'Pb': 82,
'Bi': 83,
'Po': 84,
'At': 85,
'Rn': 86,
'Fr': 87,
'Ra': 88,
'Ac': 89,
'Th': 90,
'Pa': 91,
'U': 92,
'Np': 93,
'Pu': 94
}
#obtain_file_order
file_order = sorted(os.listdir(shell_file))
#print(file_order)
desname_e = []
enum_tot = 0
for i in range(1,4): #change - ele_prop number
for j in range(1, 6): # change - shell number
for ele_prop in sorted(os.listdir(lookup_data)):
desname_e.append(ele_prop.strip().split('.')[0] + '_e_' + str(i)+ '_Shell_'+str(j))
enum_tot += 1
print(desname_e)
###################### E ###################
df_e_new = | pd.DataFrame() | pandas.DataFrame |
'''
Purpose: This script generates the PCAscatterplot.csv file that is currently being loaded in the PCA tab of the viewer.
Run as follows:
python get_scatterplot3D_template.py [counts table filepath] [metadata filepath]
Note: No genes are used for subsetting and the variable of interest is set to 'flu'.
TODO:
This script should be wrapped into a POST call that takes the user input gene list and user input variable of interest for coloring to enable interactive re-generation of the PCA plot. Currently the PCA plot is static.
'''
import sys
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
def run_pca(data_file, metadata_file, variable_of_interest, genes_of_interest = None):
#default colors
color_list = ['#8dd3c7','#ffffb3','#bebada','#fb8072','#80b1d3','#fdb462','#b3de69','#fccde5','#d9d9d9','#bc80bd'] #http://colorbrewer2.org/?type=qualitative&scheme=Set3&n=10
df = | pd.read_csv(data_file, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
from string import Formatter
DATABASE_HDF5_STRUCT = { 'root-tree':'/tre', # Path
'root-taxonomy':'/tax', # Path
'root-sequence':'/seq', # Path
'root-metadata':'/meta', # Path
'root-map':'/map', # Path
'root-stats':'/stat', # Path,
'stat-reps':'/stat/reps',
'stat-taxs':'/stat/taxs',
'tree-prior':'/tre/master/value', # Newick string UTF-8 encoded bytes. /value is explicitly refered in order to ease acess with simple .read()
'tree-parsed':'/tre/parsed/value', # Newick string UTF-8 encoded bytes. /value is explicitly refered in order to ease acess with simple .read()
'tree-object':'/tre/pickled/value', # Pickled bytes
'taxonomy-prior':'/tax/master',
'taxonomy-sheet':'/tax/parsed',
'sequence-representative':'/seq/reps',
'sequence-aligned':'/seq/algn',
'sequence-accession':'/seq/accs',
'metadata-db-summary':'/meta/summary', # pandas Series that contain all of the statistics and summary of the _local such as taxonomy summary, representative sequence summary.
'metadata-db-info':'/meta/info', # pandas Series that will contain all of the information about which structure elements are full and which are inactive
'metadata-db-stamp':'/meta/stamp', # pandas Series that contain all of the information about the _local creation such as author, creation time, etc.
'metadata-db-history':'/meta/history', # pandas Series that contain all of the information about the _local processing. Data must be sufficient to reconstruct _local to the prior state.
'map-interx-taxon':'/map/interxtax',
'map-interx-repseq':'/map/interxreps',
'map-rep2tid':'/map/reptid', # DataFrame of size len(# Valid RepSeqs) x (7 ranks + 1 TaxonID)
'map-repseq':'/map/repseq', # DataFrame of size len(# TaxonIDs) x (1 Selected RepSeqID + 1 All Related RepSeqIDs separated by `|`
'map-tree':'/map/tree'
}
def get_element_mode(element_key):
"""
Parameters
----------
element_key :
Returns
-------
"""
if element_key in ['tree-prior', 'tree-parsed', 'tree-object']:
return 1
else:
return 2
def get_element_type(element_key):
"""
Parameters
----------
element_key :
Returns
-------
"""
if get_element_mode(element_key) == 2:
if element_key in ['map-interx-taxon','map-interx-repseq','map-tree']:
return 'fixed'
else:
return 'table'
else:
return False
def get_element_index_type(element_key):
"""
Parameters
----------
element_key :
Returns
-------
"""
if element_key in ['taxonomy-prior','sequence-representative','sequence-aligned','sequence-accession','map-rep2tid', 'stat-reps']:
return 'map-interx-repseq'
elif element_key in ['taxonomy-sheet','map-repseq','stat-taxs']:
return 'map-interx-taxon'
else:
return None
def filter_interx_elements(element_key_list):
"""
Parameters
----------
element_key_list :
Returns
-------
"""
tmp_element_list = []
for element in element_key_list:
if element in ['taxonomy-prior','sequence-representative','sequence-aligned','sequence-accession','map-rep2tid','taxonomy-sheet','stat-reps','stat-reps']:
tmp_element_list.append(element)
return tmp_element_list
def filter_elements_by(startswith, exclude=[]):
"""
Parameters
----------
startswith :
exclude :
(Default value = [])
Returns
-------
"""
tmp_element_list = []
for key in DATABASE_HDF5_STRUCT.keys():
if key.startswith(startswith) and key not in exclude:
tmp_element_list.append(key)
return tmp_element_list
def missing_to_none(target_pd_data):
"""
Parameters
----------
target_pd_data :
Returns
-------
"""
if isinstance(target_pd_data,pd.DataFrame):
return target_pd_data.applymap(lambda x: None if (x == '' or pd.isna(x)) else x)
else:
return target_pd_data.map(lambda x: None if (x == '' or pd.isna(x)) else x)
def explode_element_columns(db_summary,element_key):
"""
Parameters
----------
db_summary :
element_key :
Returns
-------
"""
tmp_element_col_label = "columns-{}".format(element_key)
if tmp_element_col_label in db_summary.index:
return db_summary.loc[tmp_element_col_label].split('|')
else:
return []
def to_mode(result_obj,mode='array',order=None):
"""
Parameters
----------
result_obj :
mode :
(Default value = 'array')
order :
(Default value = None)
Returns
-------
"""
if mode == 'array':
if isinstance(result_obj, (pd.DataFrame, pd.Series)):
if order is None:
if isinstance(result_obj, pd.DataFrame):
if result_obj.shape[1]>1:
return result_obj.apply(tuple,axis=1).values
else:
return result_obj.values
else:
return result_obj.values
else:
if result_obj.index.isin(order).all():
if isinstance(result_obj, pd.DataFrame):
if result_obj.shape[1]>1:
if result_obj.index.has_duplicates:
result_obj_unq = result_obj[result_obj.index.duplicated()]
return result_obj_unq.loc[order].apply(tuple,axis=1).values
else:
return result_obj.reindex(index=order,fill_value=tuple([])).apply(tuple,axis=1).values
else:
if result_obj.index.has_duplicates:
result_obj_unq = result_obj[result_obj.index.duplicated()]
return result_obj_unq.loc[order].values
else:
return result_obj.reindex(index=order,fill_value=pd.NA).values
else:
if result_obj.index.has_duplicates:
result_obj_unq = result_obj[result_obj.index.duplicated()]
return result_obj_unq.loc[order].values
else:
return result_obj.reindex(index=order,fill_value=pd.NA).values
else:
raise ValueError('`order` index does not match `result_obj`.')
elif isinstance(result_obj, (list, tuple)):
return np.asarray(result_obj)
elif isinstance(result_obj, dict):
if order is None:
if len(list(result_obj.values())[0])>1:
return np.asarray(list(map(tuple,result_obj.values())))
else:
return np.asarray(list(result_obj.values()))
else:
tmp_result_list = []
if len(list(result_obj.values())[0])>1:
for oid in order:
tmp_result_list.append(tuple(result_obj[oid]))
else:
for oid in order:
tmp_result_list.append(result_obj[oid])
return np.asarray(tmp_result_list)
else:
if isinstance(result_obj,np.ndarray) or np.isscalar(result_obj):
return result_obj
else:
raise TypeError('Invalid data type was passed.')
elif mode == 'frame':
if isinstance(result_obj, (pd.DataFrame, pd.Series)):
if order is None:
return result_obj
else:
if result_obj.index.isin(order).all():
if result_obj.index.has_duplicates:
result_obj_unq = result_obj[result_obj.index.duplicated()]
return result_obj_unq.loc[order]
else:
return result_obj.reindex(index=order,fill_value=np.asarray([]))
else:
raise ValueError('`order` index does not match `result_obj`.')
elif isinstance(result_obj, (list, tuple)):
return | pd.Series(result_obj) | pandas.Series |
import pytest
import json
import pandas as pd
from piper.xl import WorkBook
from piper.factory import bad_quality_orders
from piper.factory import xl_test_data
from pathlib import Path
relative_folder = Path(__file__).parents[1] / 'temp/'
@pytest.fixture
def sample_orders_01():
return bad_quality_orders()
@pytest.fixture
def sample_orders_02():
return xl_test_data()
def test_workbook_add_sheet_auto(sample_orders_01):
file_name = relative_folder / 'WorkBook - auto sheet.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto')
wb.close()
expected = 1
actual = wb.last_sheet_idx
assert expected == actual
def test_workbook_add_sheet_test_zoom(sample_orders_01):
file_name = relative_folder / 'WorkBook - zoom.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', zoom=130)
wb.close()
expected = 130
actual = wb.sheet_dict.get('sheet1')[6]
assert expected == actual
def test_workbook_add_sheet_test_tab_color(sample_orders_01):
file_name = relative_folder / 'WorkBook - tab color.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', tab_color='green')
wb.close()
expected = 'green'
actual = wb.sheet_dict.get('sheet1')[5]
assert expected == actual
def test_workbook_add_sheet_test_index(sample_orders_01):
file_name = relative_folder / 'WorkBook - with index.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto', index=True)
wb.close()
expected = True
actual = wb.sheet_dict.get('sheet1')[3]
assert expected == actual
def test_workbook_add_sheet_test_invalid_theme(sample_orders_01):
file_name = relative_folder / 'WorkBook - sheet dictionary meta.xlsx'
df = pd.DataFrame(sample_orders_01)
wb = WorkBook(file_name, ts_prefix=False)
with pytest.raises(Exception):
assert wb.add_sheet(df, sheet_name='**auto', theme='invalid') is None
wb.close()
def test_workbook_add_sheet_test_sheet_dict(sample_orders_01):
df = pd.DataFrame((sample_orders_01))
file_name = relative_folder / 'WorkBook - sheet dictionary meta.xlsx'
wb = WorkBook(file_name, ts_prefix=False)
expected_ws_reference = wb.add_sheet(df, sheet_name='**auto')
wb.close()
expected_shape = (12, 9)
actual = wb.sheet_dict.get('sheet1')[0]
assert expected_shape == actual
expected_index_names = df.index.names
actual = wb.sheet_dict.get('sheet1')[1]
assert expected_index_names == actual
expected_header = True
actual = wb.sheet_dict.get('sheet1')[2]
assert expected_header == actual
expected_index = False
actual = wb.sheet_dict.get('sheet1')[3]
assert expected_index == actual
actual = wb.sheet_dict.get('sheet1')[4]
assert expected_ws_reference == actual
def test_workbook_add_sheet_sql(sample_orders_01):
df = pd.DataFrame((sample_orders_01))
file_name = relative_folder / 'WorkBook - add SQL sheet.xlsx'
wb = WorkBook(file_name, ts_prefix=False)
sql = ''' select * from eudta.f56474z1 '''
wb.add_sheet(df, sql=sql, sheet_name='**auto')
wb.close()
expected_shape = (1, 1)
actual = wb.sheet_dict.get('_sheet1')[0]
assert expected_shape == actual
def test_workbook_get_range_all(sample_orders_01):
df = pd.DataFrame(sample_orders_01)
file_name = relative_folder / 'WorkBook - get_range test.xlsx'
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto')
wb.close()
expected_shape = '$A$1:$I$13'
actual = wb.get_range(sheet_name='sheet1')
assert expected_shape == actual
def test_workbook_get_range_single_column(sample_orders_01):
df = pd.DataFrame(sample_orders_01)
file_name = relative_folder / 'WorkBook - get_range test.xlsx'
wb = WorkBook(file_name, ts_prefix=False)
wb.add_sheet(df, sheet_name='**auto')
wb.close()
expected_shape = '$E$1:$E$13'
actual = wb.get_range(sheet_name='sheet1',
column_range='E')
assert expected_shape == actual
def test_workbook_get_range_from_to(sample_orders_01):
df = | pd.DataFrame(sample_orders_01) | pandas.DataFrame |
from decimal import Decimal
import unittest, sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from unittest.mock import patch
from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv
class Test_Process_Raw_Data(unittest.TestCase):
#Test helper methods
def test_convert_datestring_array_to_datetime(self):
datestrings = ['2020-01-01 00:00:00', '2020-01-02 00:00:00', '2020-01-01 03:00:00']
expected_datetimes = [datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')]
self.assertEqual(expected_datetimes, convert_datestring_array_to_datetime(datestrings))
def test_create_expected_row(self):
input_row = [5,4,3,2,1]
expected_row = np.array([[1,2,3,4,1,2]])
actual_row = create_expected_row(input_row, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
#Test process_raw_data methods
def test_set_intervals(self):
intervals = [5, 5, 5]
set_intervals(intervals)
self.assertEqual(intervals, get_intervals())
def test_set_target_interval(self):
interval = timedelta(minutes=69)
set_target_interval(interval)
self.assertEqual(interval, get_target_interval())
def test_set_const_intervals(self):
expected_intervals = [3, 3, 3, 3, 3]
set_const_intervals(3, 5)
self.assertEqual(expected_intervals, get_intervals())
def test_set_max_input_minutes_missing(self):
minutes = 69
set_max_input_minutes_missing(minutes)
self.assertEqual(minutes, get_max_input_minutes_missing())
def test_set_market(self):
market = 'GBP/JPY'
set_market(market)
self.assertEqual(market, get_market())
def test_categorise_data(self):
self.assertEqual(1, apply_category_label_binary(1.2222, 1.2223))
self.assertEqual(0, apply_category_label_binary(1.2223, 1.2222))
@patch('forex_predictor.data_extraction.process_raw_data.pd')
def test_load_market_csv(self, mock_pd):
load_market_csv('EUR/GBP')
mock_pd.read_csv.assert_called_with('data/EUR_GBP.csv')
def test_get_dates(self):
intervals = [5, 5, 5]
set_intervals(intervals)
training_start = datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_start = datetime.strptime('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
test_start = datetime.strptime('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')
test_end = datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')
actual_training_dates, actual_validation_dates, actual_test_dates = get_dates(training_start, validation_start, test_start, test_end)
expected_training_dates = convert_datestring_array_to_datetime(['2020-01-01 00:00:00', '2020-01-01 00:15:00', '2020-01-01 00:30:00', '2020-01-01 00:45:00'])
expected_validation_dates = convert_datestring_array_to_datetime(['2020-01-01 01:00:00', '2020-01-01 01:15:00', '2020-01-01 01:30:00', '2020-01-01 01:45:00'])
expected_test_dates = convert_datestring_array_to_datetime(['2020-01-01 02:00:00', '2020-01-01 02:15:00', '2020-01-01 02:30:00', '2020-01-01 02:45:00'])
self.assertEqual(expected_training_dates, actual_training_dates)
self.assertEqual(expected_validation_dates, actual_validation_dates)
self.assertEqual(expected_test_dates, actual_test_dates)
@patch('forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates')
def test_get_relevant_data(self, mock_method):
set_intervals([15,15,15,15])
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
target_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
get_relevant_data(df, target_date)
start_date = datetime.strptime('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')
mock_method.assert_called_with(start_date, end_date, df)
def test_get_dataframe_from_dates(self):
original_df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')
actual_df = get_dataframe_from_dates(start_date, end_date, original_df)
expected_df = original_df.iloc[74:79, :]
self.assertTrue(expected_df.equals(actual_df))
def test_find_start_date_index(self):
target_date = datetime.strptime('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')
df = pd.read_csv('tests/resources/dataframe_data.csv')
actual_index = find_start_date_index(df, target_date)
expected_index = 1994
self.assertEqual(expected_index, actual_index)
def test_process_input_data(self):
set_intervals([5, 5, 5])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
expected_input_data = pd.DataFrame(data=test_data)
actual_input_data = process_input_data(df)
self.assertTrue(expected_input_data.equals(actual_input_data))
def test_process_input_data_error(self):
set_intervals([5, 5, 5, 60])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
expected_error_message = 'Insufficient data to process for this number of intervals'
try:
actual_input_data = process_input_data(df)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def test_create_row(self):
set_intervals([5,5,5])
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
input_values = | pd.DataFrame(data=test_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
| pd.Timestamp('2010-02-06') | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import duckdb
import pandas as pd
import numpy
import datetime
import sys
def run_parallel_queries(main_table, left_join_table, expected_df, iteration_count = 5):
for i in range(0, iteration_count):
output_df = None
sql = """
select
main_table.*
,t1.*
,t2.*
from main_table
left join left_join_table t1
on main_table.join_column = t1.join_column
left join left_join_table t2
on main_table.join_column = t2.join_column
"""
try:
duckdb_conn = duckdb.connect()
duckdb_conn.execute("PRAGMA threads=4")
duckdb_conn.register('main_table', main_table)
duckdb_conn.register('left_join_table', left_join_table)
output_df = duckdb_conn.execute(sql).fetchdf()
pd.testing.assert_frame_equal(expected_df, output_df)
print(output_df)
except Exception as err:
print(err)
finally:
duckdb_conn.close()
class TestParallelPandasScan(object):
def test_parallel_numeric_scan(self, duckdb_cursor):
main_table = pd.DataFrame([{"join_column": 3}])
left_join_table = pd.DataFrame([{"join_column": 3,"other_column": 4}])
run_parallel_queries(main_table, left_join_table, left_join_table)
def test_parallel_ascii_text(self, duckdb_cursor):
main_table = pd.DataFrame([{"join_column":"text"}])
left_join_table = pd.DataFrame([{"join_column":"text","other_column":"more text"}])
run_parallel_queries(main_table, left_join_table, left_join_table)
def test_parallel_unicode_text(self, duckdb_cursor):
main_table = pd.DataFrame([{"join_column":u"mühleisen"}])
left_join_table = pd.DataFrame([{"join_column": u"mühleisen","other_column":u"höhöhö"}])
run_parallel_queries(main_table, left_join_table, left_join_table)
def test_parallel_complex_unicode_text(self, duckdb_cursor):
if sys.version_info.major < 3:
return
main_table = pd.DataFrame([{"join_column":u"鴨"}])
left_join_table = pd.DataFrame([{"join_column": u"鴨","other_column":u"數據庫"}])
run_parallel_queries(main_table, left_join_table, left_join_table)
def test_parallel_emojis(self, duckdb_cursor):
if sys.version_info.major < 3:
return
main_table = pd.DataFrame([{"join_column":u"🤦🏼♂️ L🤦🏼♂️R 🤦🏼♂️"}])
left_join_table = pd.DataFrame([{"join_column": u"🤦🏼♂️ L🤦🏼♂️R 🤦🏼♂️","other_column":u"🦆🍞🦆"}])
run_parallel_queries(main_table, left_join_table, left_join_table)
def test_parallel_numeric_object(self, duckdb_cursor):
main_table = pd.DataFrame({ 'join_column': pd.Series([3], dtype="Int8") })
left_join_table = pd.DataFrame({ 'join_column': pd.Series([3], dtype="Int8"), 'other_column': pd.Series([4], dtype="Int8") })
expected_df = pd.DataFrame({ "join_column": numpy.array([3], dtype=numpy.int8), "other_column": numpy.array([4], dtype=numpy.int8)})
run_parallel_queries(main_table, left_join_table, expected_df)
def test_parallel_timestamp(self, duckdb_cursor):
main_table = pd.DataFrame({ 'join_column': [pd.Timestamp('20180310T11:17:54Z')] })
left_join_table = pd.DataFrame({ 'join_column': [pd.Timestamp('20180310T11:17:54Z')], 'other_column': [ | pd.Timestamp('20190310T11:17:54Z') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# usage:
# python gen_csv_denoised_pad_train_val.py 200015779
import sys
import pandas as pd
import numpy as np
try:
val_label = sys.argv[1]
except:
print("specify book name for validation")
sys.exit(1)
df_train = pd.read_csv('./input/train_characters.csv', header=None)
df_train.columns = ['Unicode', 'filepath']
uniq_char = df_train.Unicode.unique()
train_df_list = []
val_df_list = []
for i, cur_char in enumerate(uniq_char):
cur_df = df_train[df_train.Unicode == cur_char]
tmp_train = cur_df.drop(cur_df.index[cur_df.filepath.str.contains(val_label)])
tmp_val = cur_df[cur_df.filepath.str.contains(val_label)]
if len(tmp_val) == 0:
# If there is no character of the specified book, random sample up to 20 copies from train
val_count = int(len(tmp_train) * 0.10)
if val_count > 20:
cur_val = tmp_train.sample(20)
tmp_train = tmp_train.drop(cur_val.index)
else:
# characters that occur 20 times or less are also copied to validation
cur_val = cur_df
else:
cur_val = tmp_val
if len(tmp_train) == 0:
# Random samples up to 20 if there are no characters in the train
# except for the specified book characters.
train_count = int(len(tmp_val) * 0.10)
if train_count > 20:
cur_train = tmp_val.sample(20)
cur_val = tmp_val.drop(cur_train.index)
else:
# characters that occur 20 times or less are also copied to train
cur_train = cur_df
else:
cur_train = tmp_train
train_df_list.append(cur_train)
val_df_list.append(cur_val)
if i % 100 == 0:
print(".", end='')
sys.stdout.flush()
print("preprocess done!")
train_df = pd.concat(train_df_list)
val_df = pd.concat(val_df_list)
print("postprocess for train data for class contains less than 100 images...")
# Oversample characters that appear less than 100 times more than 100 times
counter = train_df.Unicode.value_counts()
code_and_count = {}
for elem in train_df.Unicode.unique():
if counter[elem] < 100:
code_and_count[elem] = counter[elem]
add_train_df_list = []
for elem, count in code_and_count.items():
multi_count = int(100 / count)
for i in range(multi_count):
add_train_df_list.append(train_df[train_df.Unicode == elem])
add_train_df = | pd.concat(add_train_df_list) | pandas.concat |
import numpy as np
import pandas as pd
from dcase_util.data import DecisionEncoder
class ManyHotEncoder:
""""
Adapted after DecisionEncoder.find_contiguous_regions method in
https://github.com/DCASE-REPO/dcase_util/blob/master/dcase_util/data/decisions.py
Encode labels into numpy arrays where 1 correspond to presence of the class and 0 absence.
Multiple 1 can appear on the same line, it is for multi label problem.
Args:
labels: list, the classes which will be encoded
n_frames: int, (Default value = None) only useful for strong labels. The number of frames of a segment.
Attributes:
labels: list, the classes which will be encoded
n_frames: int, only useful for strong labels. The number of frames of a segment.
"""
def __init__(self, labels, n_frames=None):
if type(labels) in [np.ndarray, np.array]:
labels = labels.tolist()
self.labels = labels
self.n_frames = n_frames
def encode_weak(self, labels):
""" Encode a list of weak labels into a numpy array
Args:
labels: list, list of labels to encode (to a vector of 0 and 1)
Returns:
numpy.array
A vector containing 1 for each label, and 0 everywhere else
"""
# useful for tensor empty labels
if type(labels) is str:
if labels == "empty":
y = np.zeros(len(self.labels)) - 1
return y
if type(labels) is pd.DataFrame:
if labels.empty:
labels = []
elif "event_label" in labels.columns:
labels = labels["event_label"]
y = np.zeros(len(self.labels))
for label in labels:
if not pd.isna(label):
i = self.labels.index(label)
y[i] = 1
return y
def encode_strong_df(self, label_df):
"""Encode a list (or pandas Dataframe or Serie) of strong labels, they correspond to a given filename
Args:
label_df: pandas DataFrame or Series, contains filename, onset (in frames) and offset (in frames)
If only filename (no onset offset) is specified, it will return the event on all the frames
onset and offset should be in frames
Returns:
numpy.array
Encoded labels, 1 where the label is present, 0 otherwise
"""
assert self.n_frames is not None, "n_frames need to be specified when using strong encoder"
if type(label_df) is str:
if label_df == 'empty':
y = np.zeros((self.n_frames, len(self.labels))) - 1
return y
y = np.zeros((self.n_frames, len(self.labels)))
if type(label_df) is pd.DataFrame:
if {"onset", "offset", "event_label"}.issubset(label_df.columns):
for _, row in label_df.iterrows():
if not pd.isna(row["event_label"]):
i = self.labels.index(row["event_label"])
onset = int(row["onset"])
offset = int(row["offset"])
y[onset:offset, i] = 1 # means offset not included (hypothesis of overlapping frames, so ok)
elif type(label_df) in [pd.Series, list, np.ndarray]: # list of list or list of strings
if type(label_df) is pd.Series:
if {"onset", "offset", "event_label"}.issubset(label_df.index): # means only one value
if not | pd.isna(label_df["event_label"]) | pandas.isna |
# import Asclepius dependencies
from pandas.core.frame import DataFrame
from asclepius.instelling import GGZ, ZKH
from asclepius.medewerker import Medewerker
from asclepius.portaaldriver import PortaalDriver
from asclepius.testen import TestFuncties, Verklaren
# import other dependencies
from typing import Union
from pandas import ExcelWriter
class ReleaseTesten:
def __init__(self, gebruiker: Medewerker, losse_bestanden: bool = False):
# Initialiseren
self.gebruiker = gebruiker
self.portaaldriver = PortaalDriver(self.gebruiker)
self.testfuncties = TestFuncties()
self.verklaren = Verklaren()
self.losse_bestanden = losse_bestanden
return None
def test_da(self, *instellingen: Union[GGZ, ZKH]):
# Download excelbestanden
mislukt_download = []
for instelling in instellingen:
try:
self.portaaldriver.webscraper_da(instelling)
except:
mislukt_download.append(instelling.klant_code)
# Test de DA
mislukt_da = []
for instelling in instellingen:
try:
# Aantallencheck
self.testfuncties.aantallencheck(instelling, False)
self.testfuncties.aantallencheck(instelling, True)
# Standaardverschillen vinden
self.verklaren.standaardverschillen_da(instelling, False)
self.verklaren.standaardverschillen_da(instelling, True)
except:
mislukt_da.append(instelling.klant_code)
if self.losse_bestanden:
for instelling in instellingen:
if instelling.klant_code not in set(mislukt_download + mislukt_da):
with ExcelWriter(f'Bevindingen DA {instelling.klant_code}.xlsx') as writer:
instelling.bevindingen_da.to_excel(writer, sheet_name=f'{instelling.klant_code}')
instelling.bevindingen_da_test.to_excel(writer, sheet_name=f'{instelling.klant_code} test')
else: pass
else:
with | ExcelWriter(f'Bevindingen DA.xlsx') | pandas.ExcelWriter |
import os
import pandas as pd
import numpy as np
import random
from human_ISH_config import *
import h5py
import time
from shutil import copyfile
import operator
import matplotlib.pyplot as plt
import math
import json
random.seed(1)
def get_stats(images_info_df):
"""
Uses the images_info_df and calculates some stats.
:param images_info_df: pandas dataframe that has the information of all image
:return: a dictionary containing stats.
"""
stats_dict = {'image_count':None, 'donor_count':None, 'female_donor_count':None, 'male_donor_count':None,
'unique_genes_count': None, 'unique_entrez_id_count' : None}
image_id_list = images_info_df['image_id']
gene_symbol_list = images_info_df['gene_symbol']
entrez_id_list = images_info_df['entrez_id']
experiment_id_list = images_info_df['experiment_id']
specimen_id_list = images_info_df['specimen_id']
donor_id_list = images_info_df['donor_id']
donor_sex_list = images_info_df['donor_sex']
female_donors = images_info_df[images_info_df['donor_sex'] == 'F']
male_donors = images_info_df[images_info_df['donor_sex'] == 'M']
# -----------
# How many donors does this study have? How many are female and how many are male?
donors_count = len(set(images_info_df['donor_id']))
print ("Total number of donors: {}".format(donors_count))
female_donors_count = len(set(female_donors['donor_id']))
print("Number of female donors: {}".format(female_donors_count))
male_donors_count = len(set(male_donors['donor_id']))
print("Number of male donors: {}".format(male_donors_count))
if female_donors_count + male_donors_count != donors_count:
print ("something is not right about the number of female and male donors ...")
# -----------
# How many unique genes does this study include?
gene_count = len(set(gene_symbol_list))
print ("Number of unique genes: {}".format(gene_count))
entrez_id_count = len(set(entrez_id_list))
print("Number of unique entrez IDs: {}".format(entrez_id_count))
if entrez_id_count != gene_count:
print ("something is not right. The number of unique genes should be equal to the number of unique entrez IDs")
# -----------
# How many genes have been tested from each donor.
# How many images do we have from each donor.
group_by_donor = images_info_df.groupby('donor_id')
unique_gene_count_per_donor_list = []
unique_image_count_per_donor_list = []
for key, item in group_by_donor:
this_group_genes = group_by_donor.get_group(key)['gene_symbol']
this_group_images = group_by_donor.get_group(key)['image_id']
unique_gene_count_per_donor_list.append(len(set(this_group_genes)))
unique_image_count_per_donor_list.append(len(set(this_group_images)))
print("Minimum number of unique genes from a donor: {}".format(min(unique_gene_count_per_donor_list)))
print("Maximum number of unique genes from a donor: {}".format(max(unique_gene_count_per_donor_list)))
print("Average number of unique genes from a donor: {}".format(np.mean(unique_gene_count_per_donor_list)))
print("Minimum number of images from a donor: {}".format(min(unique_image_count_per_donor_list)))
print("Maximum number of images from a donor: {}".format(max(unique_image_count_per_donor_list)))
print("Average number of images from a donor: {}".format(np.mean(unique_image_count_per_donor_list)))
# -----------
# How many images do we have from each gene.
# How many donors do we have from each gene.
group_by_gene = images_info_df.groupby('gene_symbol')
unique_donor_count_per_gene_list = []
unique_image_count_per_gene_list = []
for key, item in group_by_gene:
this_group_donors = group_by_gene.get_group(key)['donor_id']
this_group_images = group_by_gene.get_group(key)['image_id']
unique_donor_count_per_gene_list.append(len(set(this_group_donors)))
unique_image_count_per_gene_list.append(len(set(this_group_images)))
print("Minimum number of unique donors from a gene: {}".format(min(unique_donor_count_per_gene_list)))
print("Maximum number of unique donors from a gene: {}".format(max(unique_donor_count_per_gene_list)))
print("Average number of unique donors from a gene: {}".format(np.mean(unique_donor_count_per_gene_list)))
print("Minimum number of images from a gene: {}".format(min(unique_image_count_per_gene_list)))
print("Maximum number of images from a gene: {}".format(max(unique_image_count_per_gene_list)))
print("Average number of images from a gene: {}".format(np.mean(unique_image_count_per_gene_list)))
gene_on_all_donors_count = 0
gene_on_only_one_donor_count = 0
for item in unique_donor_count_per_gene_list:
if item == donors_count:
gene_on_all_donors_count +=1
if item == 1:
gene_on_only_one_donor_count += 1
print ("There are {} genes that have been sampled from all the {} donors.".format(gene_on_all_donors_count, donors_count))
print ("There are {} genes that have been sampled from only 1 donor.".format(gene_on_only_one_donor_count))
# -----------
stats_dict['image_count'] = len(image_id_list)
stats_dict['donor_count'] = donors_count
stats_dict['female_donor_count'] = female_donors_count
stats_dict['male_donor_count'] = male_donors_count
stats_dict['unique_genes_count'] = gene_count
stats_dict['unique_entrez_id_count'] = entrez_id_count
# -------------------
# I want to group by donor, in each donor, see on average, how many images there are per gene
# and then average over all the donors
group_by_donor = images_info_df.groupby('donor_id')
avg_num_of_imaes_per_gene_list = []
for key, item in group_by_donor:
# for each donor
this_group_genes = list(group_by_donor.get_group(key)['gene_symbol']) # get a list of its genes (has duplicates)
# for each unique genes, see how many times it appears in the list (== how many images we have of it in this donor)
this_group_genes_count_list = [[x,this_group_genes.count(x)] for x in set(this_group_genes)]
sum = 0
for item in this_group_genes_count_list:
sum += item[1]
# in this donor, on average, we have 'avg' number of images per each gene.
avg = sum / len(this_group_genes_count_list)
# append it to the list
avg_num_of_imaes_per_gene_list.append(avg)
avg_num_of_images_per_gene_in_each_donor_over_all = np.mean(avg_num_of_imaes_per_gene_list)
print ("Average number of images per each gene in each donor, Over all donors: ",avg_num_of_images_per_gene_in_each_donor_over_all)
return stats_dict
def define_sets_with_no_shared_genes(images_info_df):
"""
We want to create training, validation, and test set.
The condition is that the sets should not have any genes in common.
If INCLUDE_SZ_DATA flag is set to false, we want to make sure there are no schizophrenia genes in the training set.
if TRAIN_ON_ALL flag is set to True, then all the genes will be considered as training. We won't have a validation or test.
:param images_info_df: pandas dataframe that has the information of all image
:return: 3 pandas dataframes: training, validation, test
"""
unique_genes = list(np.unique(images_info_df['gene_symbol']))
total_unique_gene_count = len(unique_genes)
print(total_unique_gene_count)
if TRAIN_ON_ALL == False:
test_genes_count = int((TEST_SPLIT / 100.0) * total_unique_gene_count)
validation_gene_count = int((VALIDATION_SPLIT / 100.0) * total_unique_gene_count)
test_genes = random.sample(unique_genes, test_genes_count)
remaining_genes = [x for x in unique_genes if x not in test_genes]
validation_genes = random.sample(remaining_genes, validation_gene_count)
training_genes = [x for x in remaining_genes if x not in validation_genes]
# ------- filter SZ genes if necessary -------
if INCLUDE_SZ_DATA == False:
path_to_SZ_info = os.path.join(DATA_DIR, "schizophrenia", "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_SZ_info)
sz_unique_genes = list(set(list(sz_info_df['gene_symbol'])))
print(
"There are {} genes in the training set. {} schizophrenia-associated genes will be removed"
.format(len(training_genes), len(sz_unique_genes)))
training_genes = [x for x in training_genes if x not in sz_unique_genes]
print ("Number of remaining genes: {}".format(len(training_genes)))
# --------------------------------------------
training_df = images_info_df[images_info_df['gene_symbol'].isin(training_genes)]
validation_df = images_info_df[images_info_df['gene_symbol'].isin(validation_genes)]
test_df = images_info_df[images_info_df['gene_symbol'].isin(test_genes)]
training_df = training_df.sort_values(by=['image_id'])
validation_df = validation_df.sort_values(by=['image_id'])
test_df = test_df.sort_values(by=['image_id'])
train_val_df = pd.concat([training_df, validation_df], ignore_index=True)
train_val_df = train_val_df.sort_values(by=['image_id'])
sets_path = os.path.join(DATA_DIR, STUDY, "sets_"+str(PATCH_COUNT_PER_IMAGE)+"_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
if (not os.path.exists(sets_path)):
os.mkdir(sets_path)
if INCLUDE_SZ_DATA == True:
training_df.to_csv(os.path.join(sets_path, "training.csv"), index=None)
train_val_df.to_csv(os.path.join(sets_path, "training_validation.csv"), index=None)
else:
training_df.to_csv(os.path.join(sets_path, "no_sz_training.csv"), index=None)
train_val_df.to_csv(os.path.join(sets_path, "no_sz_training_validation.csv"), index=None)
validation_df.to_csv(os.path.join(sets_path, "validation.csv"), index=None)
test_df.to_csv(os.path.join(sets_path, "test.csv"), index=None)
else:
print ("-" * 50)
print ('TRAINING ON ALL')
print("-" * 50)
training_genes = [x for x in unique_genes]
# ------- filter SZ genes if necessary -------
if INCLUDE_SZ_DATA == False:
path_to_SZ_info = os.path.join(DATA_DIR, "schizophrenia", "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_SZ_info)
sz_unique_genes = list(set(list(sz_info_df['gene_symbol'])))
print(
"There are {} genes in the training set. {} schizophrenia-associated genes will be removed"
.format(len(training_genes), len(sz_unique_genes)))
training_genes = [x for x in training_genes if x not in sz_unique_genes]
print("Number of remaining genes: {}".format(len(training_genes)))
# --------------------------------------------
training_df = images_info_df[images_info_df['gene_symbol'].isin(training_genes)]
training_df = training_df.sort_values(by=['image_id'])
validation_df = None
test_df = None
train_val_df = None
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_" + str(
SEGMENTATION_TRAINING_SAMPLES) + "_seg")
if (not os.path.exists(sets_path)):
os.mkdir(sets_path)
if INCLUDE_SZ_DATA == True:
training_df.to_csv(os.path.join(sets_path, "all_training.csv"), index=None)
else:
training_df.to_csv(os.path.join(sets_path, "no_sz_all_training.csv"), index=None)
return training_df, validation_df, test_df, train_val_df
def define_sets_with_no_shared_donors(images_info_df):
"""
We want to create training, validation, and test set.
The condition is that the sets should not have any donors in common.
:param images_info_df: pandas dataframe that has the information of all image
:return: 3 pandas dataframes: training, validation, test
"""
gene_count_threshold_for_test_set = 60 #70
gene_count_threshold_for_validation_set = 85 #90
group_by_donor = images_info_df.groupby('donor_id')
test_set_donor_list = []
validation_set_donor_list = []
training_set_donor_list = []
for key, item in group_by_donor:
this_group_genes = group_by_donor.get_group(key)['gene_symbol']
if len(set(this_group_genes)) < gene_count_threshold_for_test_set:
test_set_donor_list.append(key)
elif len(set(this_group_genes)) < gene_count_threshold_for_validation_set:
validation_set_donor_list.append(key)
else:
training_set_donor_list.append(key)
print ("\n---- test set ----")
#print (test_set_info_list)
test_set_image_count = 0
test_set_gene_list = []
test_set_donor_count = len(test_set_donor_list)
for item in test_set_donor_list:
this_group_images = group_by_donor.get_group(item)['image_id']
this_group_genes = group_by_donor.get_group(item)['gene_symbol']
test_set_image_count += len(set(this_group_images))
test_set_gene_list.extend(set(this_group_genes))
print ("number of donors in test set: ",test_set_donor_count)
print ("test set image count" , test_set_image_count)
print("test set unique gene count", len(set(test_set_gene_list)))
print("\n---- validation set ----")
#print(validation_set_info_list)
validation_set_image_count = 0
validation_set_gene_list= []
validation_set_donor_count = len(validation_set_donor_list)
for item in validation_set_donor_list:
this_group_images = group_by_donor.get_group(item)['image_id']
this_group_genes = group_by_donor.get_group(item)['gene_symbol']
validation_set_image_count += len(set(this_group_images))
validation_set_gene_list.extend(set(this_group_genes))
print("number of donors in validation set: ",validation_set_donor_count)
print("validation set image count", validation_set_image_count)
print("validation set unique gene count", len(set(validation_set_gene_list)))
print("\n---- training set ----")
#print(training_set_info_list)
training_set_image_count = 0
training_set_gene_list = []
training_set_donor_count = len(training_set_donor_list)
for item in training_set_donor_list:
this_group_images = group_by_donor.get_group(item)['image_id']
this_group_genes = group_by_donor.get_group(item)['gene_symbol']
training_set_image_count += len(set(this_group_images))
training_set_gene_list.extend(set(this_group_genes))
print("number of donors in training set: ",training_set_donor_count)
print("training set image count", training_set_image_count)
print("training set unique gene count", len(set(training_set_gene_list)))
print ("\n")
#----------
training_df = images_info_df[images_info_df['donor_id'].isin(training_set_donor_list)]
validation_df = images_info_df[images_info_df['donor_id'].isin(validation_set_donor_list)]
test_df = images_info_df[images_info_df['donor_id'].isin(test_set_donor_list)]
training_df = training_df.sort_values(by=['image_id'])
validation_df = validation_df.sort_values(by=['image_id'])
test_df = test_df.sort_values(by=['image_id'])
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
if (not os.path.exists(sets_path)):
os.mkdir(sets_path)
training_df.to_csv(os.path.join(sets_path, "training.csv"), index=None)
validation_df.to_csv(os.path.join(sets_path, "validation.csv"), index=None)
test_df.to_csv(os.path.join(sets_path, "test.csv"), index=None)
return training_df, validation_df, test_df
def compare_set_genes_list(training_df, validation_df, test_df):
"""
Compare the 3 sets to see how many genes they have in common.
:param training_df: pandas dataframe containing the training data
:param validation_df: pandas dataframe containing the validation data
:param test_df: pandas dataframe containing the test data
:return: 4 lists. Each list has the shared genes between different sets:
genes shared between train and validation
genes shared between train and test
genes shared between validation and test
genes shared between all 3 sets
"""
train_set_genes = set(training_df['gene_symbol'])
validation_set_genes = set(validation_df['gene_symbol'])
test_set_genes = set(test_df['gene_symbol'])
train_validation_shared_genes_list = list(set(train_set_genes) & set(validation_set_genes))
train_test_shared_genes_list = list(set(train_set_genes) & set(test_set_genes))
validation_test_shared_genes_list = list(set(test_set_genes) & set(validation_set_genes))
all_shared_genes_list = list(set(train_set_genes) & set(validation_set_genes) & set(test_set_genes))
print("Number of shared genes between train and validation: ", len(train_validation_shared_genes_list))
print("Number of shared genes between train and test: ", len(train_test_shared_genes_list))
print("Number of shared genes between validation and test: ", len(validation_test_shared_genes_list))
print("Number of shared genes between all 3 sets: ", len(all_shared_genes_list))
print ("\n")
return train_validation_shared_genes_list, train_test_shared_genes_list, validation_test_shared_genes_list, all_shared_genes_list
def create_new_sets_by_removing_shared_genes(images_info_df, training_df, validation_df, test_df, train_validation_shared_genes_list,
train_test_shared_genes_list, validation_test_shared_genes_list, all_shared_genes_list):
"""
This function gets the set dataframes and the list of genes that are shared between them.
It then modifies validation set so that it doesn't have any genes shared with test set.
And modifies training set so that it doesn't have any genes shared with the new validation set and the test set.
:param images_info_df: pandas dataframe that has the information of all image
:param training_df: pandas dataframe containing the training data
:param validation_df: pandas dataframe containing the validation data
:param test_df: pandas dataframe containing the test data
:param train_validation_shared_genes_list: list of genes shared between train and validation
:param train_test_shared_genes_list: list of genes shared between train and test
:param validation_test_shared_genes_list: list of genes shared between validation and test
:param all_shared_genes_list: list of genes shared between all 3 sets
:return: 3 dataframes: training, validation, test
"""
print ("Modifying the sets...")
# -----------
#print ("---- Handling validation")
validation_set_genes = set(validation_df['gene_symbol'])
genes_not_shared_between_val_test = set(validation_set_genes) - set(validation_test_shared_genes_list)
new_validation_df = validation_df[validation_df['gene_symbol'].isin(genes_not_shared_between_val_test)]
new_validation_images = set(new_validation_df['image_id'])
new_validation_genes = set(new_validation_df['gene_symbol'])
#print ("new_validation_set_image_count: ", len(new_validation_images))
#print("new_validation_set_gene_count: ",len(new_validation_genes))
#print ("\n")
# ----------
#print ("---- Handling training")
training_set_genes = set(training_df['gene_symbol'])
genes_not_shared_between_train_validation_test = set(training_set_genes) - set(train_test_shared_genes_list) - set(new_validation_genes)
new_training_df = training_df[training_df['gene_symbol'].isin(genes_not_shared_between_train_validation_test)]
new_training_genes = set(new_training_df['gene_symbol'])
new_training_images = set(new_training_df['image_id'])
#print("new_training_set_image_count: ", len(new_training_images))
#print("new_training_set_gene_count: ", len(new_training_genes))
#print("\n")
return new_training_df, new_validation_df, test_df
def get_stats_on_sets(stats_dict, training_df, validation_df, test_df):
"""
Calculates some stats on the sets.
:param images_info_df: pandas dataframe that has the information of all image
:param training_df: pandas dataframe containing the training data
:param validation_df: pandas dataframe containing the validation data
:param test_df: pandas dataframe containing the test data
:return: None
"""
original_image_count = stats_dict['image_count']
original_gene_count = stats_dict['unique_genes_count']
# ----- training info ------
training_genes_count = len(set(training_df['gene_symbol']))
training_images_count = len(set(training_df['image_id']))
training_donor_count = len(set(training_df['donor_id']))
print ("\n---- Train ----")
print ("image count: ", training_images_count)
print ("gene count: ", training_genes_count)
print ("donor count: ", training_donor_count)
# ----- validation info -----
if validation_df is not None:
validation_images_count = len(set(validation_df['image_id']))
validation_genes_count = len(set(validation_df['gene_symbol']))
validation_donor_count = len(set(validation_df['donor_id']))
else:
validation_images_count = 0
validation_genes_count = 0
validation_donor_count = 0
print("\n---- Validation ----")
print("image count: ", validation_images_count)
print("gene count: ", validation_genes_count)
print("donor count: ", validation_donor_count)
# ----- test info ------
if test_df is not None:
test_images_count = len(set(test_df['image_id']))
test_genes_count = len(set(test_df['gene_symbol']))
test_donor_count = len(set(test_df['donor_id']))
else:
test_images_count = 0
test_genes_count = 0
test_donor_count = 0
print("\n---- Test ----")
print("image count: ", test_images_count)
print("gene count: ", test_genes_count)
print("donor count: ", test_donor_count)
current_image_count = training_images_count + validation_images_count + test_images_count
current_gene_count = training_genes_count + validation_genes_count + test_genes_count
print ("original image count: ", original_image_count)
print ("original gene count: ", original_gene_count)
print("\n")
print("current image count: ", current_image_count)
print("current gene count: ", current_gene_count)
print("\n")
print (original_image_count - current_image_count , " images thrown away")
print (original_gene_count - current_gene_count, " genes thrown away")
print ("\n")
print ("Train image percentage: ", (training_images_count/current_image_count)*100)
print("Validation image percentage: ", (validation_images_count / current_image_count) * 100)
print("Test image percentage: ", (test_images_count / current_image_count) * 100)
def donor_info(my_set):
group_by_donor = my_set.groupby('donor_id')
unique_gene_count_per_donor_list = []
unique_image_count_per_donor_list = []
for key, item in group_by_donor:
this_group_genes = group_by_donor.get_group(key)['gene_symbol']
this_group_images = group_by_donor.get_group(key)['image_id']
unique_gene_count_per_donor_list.append((key,len(set(this_group_genes))))
unique_image_count_per_donor_list.append((key,len(set(this_group_images))))
print("\ngene count per donor: ")
print (unique_gene_count_per_donor_list)
print ("\nimage count per donor: ")
print (unique_image_count_per_donor_list)
def make_triplet_csv_no_segmentation(df, out_file):
"""
Use this function to create input suited for the triplet-reid training scripts
"""
if df is not None:
temp_df = df.assign(image=lambda df: df.image_id.apply(lambda row: "{}.jpg".format(row)))[['gene_symbol', 'image']]
new_image_info= []
total_number_of_circles = NUMBER_OF_CIRCLES_IN_HEIGHT * NUMBER_OF_CIRCLES_IN_WIDTH
for patch_index in range(1, total_number_of_circles+1):
patch_image_list = [(id.split(".")[0]+"_"+str(patch_index)+".jpg",gene) for id, gene in zip(temp_df['image'],temp_df['gene_symbol'])]
new_image_info += patch_image_list
new_df = pd.DataFrame(columns=['gene_symbol','image'])
new_df['image'] = [item[0] for item in new_image_info]
new_df['gene_symbol'] = [item[1] for item in new_image_info]
new_df = new_df.sort_values(by=['image'])
return (new_df.to_csv(out_file, index=False, header=False))
else:
return None
def make_triplet_csv_with_segmentation(df, out_file):
print ("_"*50)
print ("OUT FILE is: ", out_file)
if df is not None:
csv_file_name = "less_than_" + str(PATCH_COUNT_PER_IMAGE) + ".csv"
not_enough_patches_df = pd.read_csv(os.path.join(DATA_DIR, STUDY, "segmentation_data","trained_on_"+str(SEGMENTATION_TRAINING_SAMPLES) ,"outlier_images", csv_file_name))
not_enough_patches_dict = dict(zip(not_enough_patches_df["image_id"], not_enough_patches_df["count"]))
temp_df = df.assign(image=lambda df: df.image_id.apply(lambda row: "{}.jpg".format(row)))[['gene_symbol', 'image']]
new_image_info = []
for id, gene in zip(temp_df['image'],temp_df['gene_symbol']):
id_temp = int(id.split(".")[0])
if id_temp in not_enough_patches_dict:
count = not_enough_patches_dict[id_temp]
for patch_index in range(0, count):
patch_image_list = [(id.split(".")[0] + "_" + str(patch_index) + ".jpg", gene)]
new_image_info += patch_image_list
else:
for patch_index in range(0, PATCH_COUNT_PER_IMAGE):
patch_image_list = [(id.split(".")[0] + "_" + str(patch_index) + ".jpg", gene)]
new_image_info += patch_image_list
new_df = pd.DataFrame(columns=['gene_symbol', 'image'])
new_df['image'] = [item[0] for item in new_image_info]
new_df['gene_symbol'] = [item[1] for item in new_image_info]
new_df = new_df.sort_values(by=['image'])
return (new_df.to_csv(out_file, index=False, header=False))
else:
return None
def make_custom_triplet_csv(study, df, output_dir, output_name, patch_count_per_image = 50):
csv_file_name = "less_than_" + str(patch_count_per_image) + ".csv"
not_enough_patches_df = pd.read_csv(
os.path.join(DATA_DIR, study, "segmentation_data", "trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES),
"outlier_images", csv_file_name))
not_enough_patches_dict = dict(zip(not_enough_patches_df["image_id"], not_enough_patches_df["count"]))
temp_df = df.assign(image=lambda df: df.image_id.apply(lambda row: "{}.jpg".format(row)))[['gene_symbol', 'image']]
new_image_info = []
for id, gene in zip(temp_df['image'], temp_df['gene_symbol']):
id_temp = int(id.split(".")[0])
if id_temp in not_enough_patches_dict:
count = not_enough_patches_dict[id_temp]
for patch_index in range(0, count):
patch_image_list = [(id.split(".")[0] + "_" + str(patch_index) + ".jpg", gene)]
new_image_info += patch_image_list
else:
for patch_index in range(0, PATCH_COUNT_PER_IMAGE):
patch_image_list = [(id.split(".")[0] + "_" + str(patch_index) + ".jpg", gene)]
new_image_info += patch_image_list
new_df = pd.DataFrame(columns=['gene_symbol', 'image'])
new_df['image'] = [item[0] for item in new_image_info]
new_df['gene_symbol'] = [item[1] for item in new_image_info]
new_df = new_df.sort_values(by=['image'])
out_file = os.path.join(output_dir, output_name)
new_df.to_csv(out_file, index=False, header=False)
def make_triplet_csvs(dfs):
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
out_base = sets_path + "/triplet"
if PATCH_TYPE=="segmentation":
if TRAIN_ON_ALL == True:
if INCLUDE_SZ_DATA == True:
# training on everything, and keep sz genes. Validation and test dfs will be None.
return tuple((make_triplet_csv_with_segmentation(df, "{}_{}.csv".format(out_base, ext)) and "{}_{}.csv".format(
out_base, ext))
for df, ext in zip(dfs, ("all_training", "validation", "test", "training_validation")))
else: # training on everything, but exclude sz genes. Validation and test dfs will be None.
return tuple(
(make_triplet_csv_with_segmentation(df, "{}_{}.csv".format(out_base, ext)) and "{}_{}.csv".format(
out_base, ext))
for df, ext in zip(dfs, ("no_sz_all_training", "validation", "test", "training_validation")))
else: # Not training on everything. So we have validation and test dfs as well.
if INCLUDE_SZ_DATA == True:
return tuple(
(make_triplet_csv_with_segmentation(df, "{}_{}.csv".format(out_base, ext)) and "{}_{}.csv".format(
out_base, ext))
for df, ext in zip(dfs, ("training", "validation", "test", "no_sz_training_validation")))
else: # not training on everything, So we have validation and test dfs. but exclude sz genes from training.
return tuple(
(make_triplet_csv_with_segmentation(df, "{}_{}.csv".format(out_base, ext)) and "{}_{}.csv".format(
out_base, ext))
for df, ext in zip(dfs, ("no_sz_training", "validation", "test", "no_sz_training_validation")))
else: # no segmentation
if INCLUDE_SZ_DATA == True:
return tuple((make_triplet_csv_no_segmentation(df, "{}_{}.csv".format(out_base,ext)) and "{}_{}.csv".format(out_base, ext))
for df, ext in zip(dfs, ("training", "validation", "test", "training_validation")))
else:
return tuple((make_triplet_csv_no_segmentation(df,
"{}_{}.csv".format(out_base, ext)) and "{}_{}.csv".format(
out_base, ext))
for df, ext in zip(dfs, ("no_sz_training", "validation", "test", "no_sz_training_validation")))
def convert_h5_to_csv(experiment_root =None):
if experiment_root == None:
experiment_root = EXPERIMENT_ROOT
exp_root_contents = os.listdir(experiment_root)
for item in exp_root_contents:
if item.endswith(".h5"):
embedding_csv_name = item.split(".")[0] + ".csv"
set_csv_file_name = embedding_csv_name.replace("_embeddings", "")
print ("set csv file name is: ", set_csv_file_name)
print ("item is: ", item)
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_" + str(
SEGMENTATION_TRAINING_SAMPLES) + "_seg")
# ----
if "autism" in item:
sets_path = os.path.join(DATA_DIR, "autism", "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_" + str(
SEGMENTATION_TRAINING_SAMPLES) + "_seg")
elif "schizophrenia" in item:
sets_path = os.path.join(DATA_DIR, "schizophrenia", "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_" + str(
SEGMENTATION_TRAINING_SAMPLES) + "_seg")
# ----
set_csv_file = os.path.join(sets_path, set_csv_file_name)
df = pd.read_csv(set_csv_file, names=['gene', 'image_id'])
f = h5py.File(os.path.join(experiment_root, item), 'r')['emb']
df['image_id']= df.apply(lambda x: x['image_id'].split('.')[0], axis = 1)
pd.DataFrame(np.array(f), index=df.image_id).to_csv(os.path.join(experiment_root, embedding_csv_name))
def save_embedding_info_into_file(filename):
if (not os.path.exists(EMBEDDING_DEST)):
os.mkdir(EMBEDDING_DEST)
if (not os.path.exists(os.path.join(EMBEDDING_DEST, filename))):
os.mkdir(os.path.join(EMBEDDING_DEST, filename))
embed_info_dir = os.path.join(EMBEDDING_DEST, filename)
exp_root = os.path.join(DATA_DIR, STUDY, "experiment_files", "experiment_" + filename)
exp_root_contents = os.listdir(exp_root)
for item in exp_root_contents:
if item.endswith(".csv"):
copyfile(os.path.join(exp_root, item), os.path.join(embed_info_dir, item))
elif item.endswith(".json"):
copyfile(os.path.join(exp_root, item), os.path.join(embed_info_dir, item))
elif item.endswith(".log"):
copyfile(os.path.join(exp_root, item), os.path.join(embed_info_dir, item))
elif item.startswith("events."):
copyfile(os.path.join(exp_root, item), os.path.join(embed_info_dir, item))
return filename
def merge_embeddings_to_donor_level(filename):
"""
We have an embedding for every patch in the dataset. However, each donor may have more than one image associated to it.
This function will take all the images that correspond to a donor, and average over the values of the embedding vector to generate a final embedding for that gene.
"""
embed_file_contents = os.listdir(os.path.join(EMBEDDING_DEST, filename))
for item in embed_file_contents:
if item.endswith("embeddings.csv"):
# if item.endswith("_gene_level.csv") or item.endswith("_image_level.csv"):
# pass
# else:
embeddings_file = pd.read_csv(os.path.join(EMBEDDING_DEST, filename, item))
if "autism" in item:
image_root = os.path.join(DATA_DIR, "autism", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
elif "schizophrenia" in item:
image_root = os.path.join(DATA_DIR, "schizophrenia", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
else:
image_root = IMAGE_ROOT
patches_info = pd.read_csv(os.path.join(image_root, "valid_patches_info.csv"))
embeddings_file = embeddings_file.rename(columns={'image_id': 'patch_id'})
# perform left merge on the two dataframes to add gene_symbol to the embeddings.csv
merged_df = embeddings_file.merge(patches_info[["patch_id", "donor_id"]], how="left", on="patch_id")
# reorder the dataframe columns
merged_columns = list(merged_df)
merged_columns = [merged_columns[0]] + [merged_columns[-1]] + merged_columns[1:-1]
merged_df = merged_df[merged_columns]
# drop the patch_id column
merged_df = merged_df.drop(columns=["patch_id"])
# group by gene_symbol and average over the embedding values
grouped_df = merged_df.groupby(['donor_id']).mean()
print(grouped_df.head())
print("the number of donors is: {}".format(len(grouped_df)))
# and then I want to save this file as gene_embddings in the same folder.
item_name = item.split(".")[0]
save_to_path = os.path.join(EMBEDDING_DEST, filename, item_name + "_donor_level.csv")
grouped_df.to_csv(save_to_path)
def merge_embeddings_to_gene_level(filename):
"""
We have an embedding for every patch in the dataset. However, each gene may have more than one image associated to it.
This function will take all the images that correspond to an image, and average over the values of the embedding vector to generate a final embedding for that gene.
"""
embed_file_contents = os.listdir(os.path.join(EMBEDDING_DEST, filename))
for item in embed_file_contents:
if item.endswith("embeddings.csv"):
#if item.endswith("_gene_level.csv") or item.endswith("_image_level.csv"):
#pass
#else:
embeddings_file = pd.read_csv(os.path.join(EMBEDDING_DEST, filename, item))
if "autism" in item:
image_root = os.path.join(DATA_DIR, "autism", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
elif "schizophrenia" in item:
image_root = os.path.join(DATA_DIR, "schizophrenia", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
else:
image_root = IMAGE_ROOT
patches_info = pd.read_csv(os.path.join(image_root, "valid_patches_info.csv"))
embeddings_file = embeddings_file.rename(columns={'image_id': 'patch_id'})
# perform left merge on the two dataframes to add gene_symbol to the embeddings.csv
merged_df = embeddings_file.merge(patches_info[["patch_id", "gene_symbol"]], how = "left" , on = "patch_id")
# reorder the dataframe columns
merged_columns = list(merged_df)
merged_columns = [merged_columns[0]] + [merged_columns [-1]] + merged_columns[1:-1]
merged_df = merged_df[merged_columns]
# drop the patch_id column
merged_df = merged_df.drop(columns=["patch_id"])
# group by gene_symbol and average over the embedding values
grouped_df = merged_df.groupby(['gene_symbol']).mean()
print (grouped_df.head())
print ("the number of genes is: {}".format(len(grouped_df)))
# and then I want to save this file as gene_embddings in the same folder.
item_name = item.split(".")[0]
save_to_path = os.path.join(EMBEDDING_DEST, filename, item_name+"_gene_level.csv")
grouped_df.to_csv(save_to_path)
def merge_embeddings_to_image_level(filename):
"""
We have an embedding for every patch in the dataset.
This function will take all the patches that correspond to an image, and average over the values of the
embedding vector to generate a final embedding for that image.
"""
embed_file_contents = os.listdir(os.path.join(EMBEDDING_DEST, filename))
for item in embed_file_contents:
if item.endswith("embeddings.csv"):
#if item.endswith("_gene_level.csv") or item.endswith("_image_level.csv"):
#pass
#else:
print ("staaaaaaart: ", item)
embeddings_file = pd.read_csv(os.path.join(EMBEDDING_DEST, filename, item))
if "autism" in item:
image_root = os.path.join(DATA_DIR, "autism", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
elif "schizophrenia" in item:
image_root = os.path.join(DATA_DIR, "schizophrenia", "segmentation_data",
"trained_on_" + str(SEGMENTATION_TRAINING_SAMPLES), "results",
"final_patches_" + str(PATCH_COUNT_PER_IMAGE))
else:
image_root = IMAGE_ROOT
patches_info = pd.read_csv(os.path.join(image_root, "valid_patches_info.csv"))
print (embeddings_file.head())
print ("---")
print (patches_info.head())
im_id_list = patches_info['image_id']
im_id_ex = im_id_list[10]
print (im_id_ex)
print (type(im_id_ex))
if filename == "random":
embeddings_file = embeddings_file.rename(columns={'id': 'patch_id'})
else:
embeddings_file = embeddings_file.rename(columns={'image_id': 'patch_id'})
p_id_list =embeddings_file['patch_id']
p_id_ex = p_id_list[10]
print (p_id_ex)
print (type(p_id_ex))
print ("~~~~~")
# perform left merge on the two dataframes to add gene_symbol to the embeddings.csv
merged_df = embeddings_file.merge(patches_info[["patch_id", "image_id"]], how="left", on="patch_id")
print ("_---")
print (merged_df.head())
# reorder the dataframe columns
merged_columns = list(merged_df)
merged_columns = [merged_columns[0]] + [merged_columns[-1]] + merged_columns[1:-1]
merged_df = merged_df[merged_columns]
print (merged_df.head())
print ("///")
im_id_list = merged_df['image_id']
im_id_ex = im_id_list[10]
print (im_id_ex)
print (type(im_id_ex))
# drop the patch_id column
merged_df = merged_df.drop(columns=["patch_id"])
merged_df = merged_df.astype({'image_id': 'int'})
print ("_____")
print (merged_df.head())
# group by gene_symbol and average over the embedding values
grouped_df = merged_df.groupby(['image_id']).mean()
print ("[[[[")
print(grouped_df.head())
print("the number of images is: {}".format(len(grouped_df)))
# and then I want to save this file as gene_embddings in the same folder.
item_name = item.split(".")[0]
save_to_path = os.path.join(EMBEDDING_DEST, filename, item_name + "_image_level.csv")
grouped_df.to_csv(save_to_path)
def filter_out_common_genes(df_file_name,threshold = 3):
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
df = pd.read_csv(os.path.join(sets_path, df_file_name))
print(len(df))
genes = df.iloc[:, 0]
unique_gene_count_dict = {}
genes_unique, counts = np.unique(genes, return_counts=True)
for i in range(len(genes_unique)):
unique_gene_count_dict[genes_unique[i]] = counts[i]
sorted_dict = sorted(unique_gene_count_dict.items(), key=operator.itemgetter(1))
print (sorted_dict)
most_common = []
for i in range(threshold):
most_common.append(sorted_dict[-1-i][0])
# ----------
new_df = df[~df.iloc[:,0].isin(most_common)]
print(len(new_df))
genes = new_df.iloc[:, 0]
unique_gene_count_dict = {}
genes_unique, counts = np.unique(genes, return_counts=True)
for i in range(len(genes_unique)):
unique_gene_count_dict[genes_unique[i]] = counts[i]
sorted_dict = sorted(unique_gene_count_dict.items(), key=operator.itemgetter(1))
print(sorted_dict)
new_df_file_name = df_file_name.split(".")[0] + "_filtered.csv"
new_df.to_csv(os.path.join(sets_path, new_df_file_name), index=None)
def filter_out_genes_out_of_mean_and_std(df_file_name):
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
in_range = []
df = pd.read_csv(os.path.join(sets_path, df_file_name))
print(len(df))
genes = df.iloc[:, 0]
unique_gene_count_dict = {}
genes_unique, counts = np.unique(genes, return_counts=True)
for i in range(len(genes_unique)):
unique_gene_count_dict[genes_unique[i]] = counts[i]
sorted_dict = sorted(unique_gene_count_dict.items(), key=operator.itemgetter(1))
print (sorted_dict)
ordered_genes = [item[0] for item in sorted_dict]
ordered_unique_gene_count = [item[1] for item in sorted_dict]
avg =np.mean(ordered_unique_gene_count)
sd = np.std(ordered_unique_gene_count)
max_lim = int(avg) + int(sd)
min_lim = int(avg) - int(sd)
print ("avg is: ", avg)
print ("sd is: ", sd)
print ("max lim is: ", max_lim)
print ("min lim is: ",min_lim)
num_of_out_of_range_genes = 0
num_of_out_of_range_images = 0
for item in sorted_dict:
if item[1]> min_lim and item[1] < max_lim:
in_range.append(item[0])
else:
num_of_out_of_range_genes +=1
num_of_out_of_range_images += item[1]
print ("num of out of range genes: ", num_of_out_of_range_genes)
print ("num of out of range images: ", num_of_out_of_range_images)
# ----------
new_df = df[df.iloc[:, 0].isin(in_range)]
print(len(new_df))
genes = new_df.iloc[:, 0]
unique_gene_count_dict = {}
genes_unique, counts = np.unique(genes, return_counts=True)
for i in range(len(genes_unique)):
unique_gene_count_dict[genes_unique[i]] = counts[i]
sorted_dict = sorted(unique_gene_count_dict.items(), key=operator.itemgetter(1))
print(sorted_dict)
new_df_file_name = df_file_name.split(".")[0] + "_in_range.csv"
new_df.to_csv(os.path.join(sets_path, new_df_file_name), index=None)
def draw_hist(df_file_name):
sets_path = os.path.join(DATA_DIR, STUDY, "sets_" + str(PATCH_COUNT_PER_IMAGE) + "_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
df = pd.read_csv(os.path.join(sets_path, df_file_name))
print(len(df))
genes = df.iloc[:, 0]
unique_gene_count_dict = {}
genes_unique, counts = np.unique(genes, return_counts=True)
for i in range(len(genes_unique)):
unique_gene_count_dict[genes_unique[i]] = counts[i]
sorted_dict = sorted(unique_gene_count_dict.items(), key=operator.itemgetter(1))
ordered_genes = [item[0] for item in sorted_dict]
ordered_unique_gene_count = [item[1] for item in sorted_dict]
print(ordered_genes)
print(ordered_unique_gene_count)
print(np.mean(ordered_unique_gene_count))
print(np.std(ordered_unique_gene_count))
plt.hist(ordered_unique_gene_count, normed=False, bins=100)
plt.ylabel('unique gene count')
plt.show()
def images_wiht_no_valid_patches():
path_to_outliers = os.path.join(DATA_DIR,STUDY,"segmentation_data","trained_on_"+str(SEGMENTATION_TRAINING_SAMPLES),"outlier_images")
less_than_thresh_df = pd.read_csv(os.path.join(path_to_outliers, "less_than_" + str(PATCH_COUNT_PER_IMAGE) + ".csv"))
no_valid_patch_list = list(less_than_thresh_df[less_than_thresh_df["count"] == 0]["image_id"])
no_valid_patch_list = [str(item) for item in no_valid_patch_list]
return no_valid_patch_list
def make_sets():
images_info_df = pd.read_csv(os.path.join(DATA_DIR, STUDY, "human_ISH_info.csv"))
no_valid_patch_list = images_wiht_no_valid_patches()
images_info_df = images_info_df[~images_info_df["image_id"].isin(no_valid_patch_list)]
stats_dict = get_stats(images_info_df)
# if TRAIN_ON_ALL is set to True, then validation_df, test_df, train_val_df will all be None
training_df, validation_df, test_df, train_val_df = define_sets_with_no_shared_genes(images_info_df)
get_stats_on_sets(stats_dict, training_df, validation_df, test_df)
make_triplet_csvs((training_df, validation_df, test_df, train_val_df))
filter_out_common_genes("triplet_training.csv")
filter_out_genes_out_of_mean_and_std("triplet_training.csv")
"""
training_df, validation_df, test_df = define_sets_with_no_shared_donors(images_info_df)
train_validation_shared_genes_list, train_test_shared_genes_list, validation_test_shared_genes_list, all_shared_genes_list = \
compare_set_genes_list(training_df, validation_df, test_df)
new_training_df, new_validation_df, test_df = create_new_sets_by_removing_shared_genes(images_info_df, training_df, validation_df, test_df,
train_validation_shared_genes_list, train_test_shared_genes_list,
validation_test_shared_genes_list, all_shared_genes_list)
get_stats_on_sets(stats_dict, new_training_df, new_validation_df, test_df)
"""
def generate_random_embeddings( embeddings_length):
"""
this function generates random embeddings for the images. The result will be a csv files that has the embedding vector of every image.
:param embeddings_length: the length of the embedding vector which also determines the number of columns in the final csv file.
:return: None
"""
#set_name_list = ["training.csv", "training_validation.csv", "validation.csv"]
#set_name_list = ["all_training.csv", "no_sz_all_training.csv"]
set_name_list = ["test.csv"]
for set_name in set_name_list:
print ("set: ", set_name)
path_to_info_csv = os.path.join(DATA_DIR,STUDY, "sets_50_patches_40_seg/" + set_name)
#path_to_info_csv = os.path.join(IMAGE_ROOT,info_csv_file)
info_csv = pd.read_csv(path_to_info_csv)
columns = list(info_csv)
id_column = info_csv[columns[0]]
n_images = len(info_csv)
cols = np.arange(0, embeddings_length)
cols = list(map(str, cols))
cols = ['id'] + cols
random_embed_file = pd.DataFrame(columns=cols)
random_embed_file['id'] = id_column
for i in range(embeddings_length):
sample = np.random.uniform(size=(n_images,))
random_embed_file[str(i)] = sample
path_to_random = os.path.join(EMBEDDING_DEST, "random")
if (not os.path.exists(path_to_random)):
os.mkdir(path_to_random)
random_embed_file.to_csv(os.path.join(path_to_random, "random_" + set_name.split(".")[0] +"_embeddings_image_level.csv"),index=None)
print ("finished generating random embeddings...")
def generate_random_embeddings_for_disease_dataset(embeddings_length, study=None):
"""
this function generates random embeddings for the images of a certain dataser. The result will be a csv files that has the embedding vector of every image.
:param study: the specific study (=disease) dataset. Could be schizophrenia or autism.
:param embeddings_length: the length of the embedding vector which also determines the number of columns in the final csv file.
:return: None
"""
if study == None:
study = STUDY
path_to_info_csv = os.path.join(DATA_DIR, study, "human_ISH_info.csv")
info_csv = pd.read_csv(path_to_info_csv,)
columns = list(info_csv)
id_column = info_csv[columns[0]]
n_images = len(info_csv)
cols = np.arange(0, embeddings_length)
cols = list(map(str, cols))
cols = ['id'] + cols
random_embed_file = pd.DataFrame(columns=cols)
random_embed_file['id'] = id_column
for i in range(embeddings_length):
sample = np.random.uniform(size=(n_images,))
random_embed_file[str(i)] = sample
path_to_random = os.path.join(DATA_DIR, study, "segmentation_embeddings", "random")
if (not os.path.exists(path_to_random)):
os.mkdir(path_to_random)
random_embed_file.to_csv(os.path.join(path_to_random, "random_embeddings_image_level.csv"),index=None)
print ("finished generating random embeddings...")
def get_embeddings_from_pre_trained_model_for_each_set(model_name ="resnet50"):
mode_folder_name = model_name + "_" + str(PATCH_COUNT_PER_IMAGE) + "_patches"
path_to_pre_trained_embeddings = os.path.join(EMBEDDING_DEST, mode_folder_name, model_name+"_embeddings_image_level.csv")
pre_trained_embeddings = | pd.read_csv(path_to_pre_trained_embeddings) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ......................................................
# ___
# ____ _ _______/ | ____ ____
# / __ \ | /| / / ___/ /| | / __ \/ __ \
# / /_/ / |/ |/ / / / ___ |/ /_/ / /_/ /
# / .___/|__/|__/_/ /_/ |_/ .___/ .___/
# /_/ /_/ /_/
#
# Author: <NAME>
# Scope: Dash application to interactively analyze
# power consuptions recorded by
# https://github.com/andros21/mcce.git
# querying from local or remote database
# ......................................................
import calendar
import datetime
import os
import re
import sqlite3
import dash
from dash import dcc
from dash import html
from dash import dash_table as dtt
import numpy as np
import pandas as pd
from dash.dependencies import Input, Output
from flask import Flask
# Create dynamic application
# --------------------------------
server = Flask(__name__)
app = dash.Dash(__name__, server)
app.title = "ePwr Stats"
# Default vars
# -------------
dyear = int((datetime.datetime.now() - datetime.timedelta(days=1)).year)
dmonth = 1
dday = 1
def serve_layout():
"""
Return application layout, so that at every
page refresh the app will refresh the input files
"""
# Load markdown header and footer
# ----------------------------------------------
with open("assets/head.md", "r") as fs:
header = fs.read()
with open("assets/foot.md", "r") as fs:
footer = fs.read()
footer = footer.replace("YEAR", str(datetime.datetime.now().year))
# Define daily stats table, and search for
# different year DBs
# ----------------------------------------------
global dailyStats
yearDBs = [
re.search(r"20[2-9][0-9]", os.path.basename(db)).group()
for db in os.listdir(os.path.dirname(f"{os.environ['DATABASE_URL']}.{dyear}"))
]
# Build app layout and return it
# ----------------------------------------------
return html.Div(
className="main-div",
children=[
dcc.Markdown(header, className="header-div"),
html.Div(
[
dcc.Slider(
id="year-slider",
className="month-slider",
min=int(min(yearDBs)),
max=int(max(yearDBs)),
value=dyear,
marks={int(year): year for year in yearDBs},
step=None,
),
dcc.Slider(id="month-slider", className="month-slider"),
html.Div(id="serialize-df", style={"display": "none"}),
dcc.Graph(id="hist-slider", className="hist-slider"),
html.Div(
className="table-slider",
children=[
html.Div(
className="plot-hover",
children=[dcc.Graph(id="plot-hover")],
),
html.Div(
className="mtable-slider",
children=[
dtt.DataTable(
id="mtable-slider",
columns=[
{"name": " ", "id": "1"},
{"name": "Powerh [kWh]", "id": "2"},
],
style_cell={"textAlign": "center"},
style_header={
"backgroundColor": "#F3F3F3",
"fontWeight": "bold",
},
style_cell_conditional=[
{
"if": {"column_id": "1"},
"fontWeight": "bold",
}
],
),
dcc.Markdown(
id="mark-slider", className="mark-slider"
),
],
),
],
),
html.Div(
className="table-slider",
children=[
html.Div(
className="mmark-slider",
children=[
dcc.Markdown(
id="mark1-slider", className="mark-slider"
)
],
),
html.Div(
className="pie-slider",
children=[dcc.Graph(id="pie-slider")],
),
],
),
]
),
dcc.Markdown(footer, className="footer-div"),
],
)
# Create app layout
# -----------------------
app.layout = serve_layout
# Callbacks functions
# -------------------------------------
@app.callback(
[
Output("month-slider", "min"),
Output("month-slider", "max"),
Output("month-slider", "value"),
Output("month-slider", "marks"),
Output("month-slider", "step"),
Output("serialize-df", "children"),
],
Input("year-slider", "value"),
)
def update_month(year):
"""
Update month slider as function of
selected database year.
In case of None use Current year as default year
"""
db_uri = f"{os.environ['DATABASE_URL']}.{year}" if year is not None else dyear
engine = sqlite3.connect(db_uri)
dailyStats = pd.read_sql(
"select * from avg", engine, index_col="index", parse_dates=["index"]
)
engine.close()
return (
dailyStats.index.month.min(),
dailyStats.index.month.max(),
dailyStats.index.month.min(),
{
month: datetime.date(1900, month, 1).strftime("%b")
for month in range(
dailyStats.index.month.min(), dailyStats.index.month.max() + 1
)
},
None,
dailyStats.to_json(),
)
@app.callback(
Output("hist-slider", "figure"),
[
Input("year-slider", "value"),
Input("month-slider", "value"),
Input("serialize-df", "children"),
],
)
def update_hist(year, month, ser):
"""
Update histogram plot as a function of
selected year in year-slider
selected month in month-slider.
In case of None use Current year as default year
In case of None use January as default month
"""
year = year if year is not None else dyear
month = month if month is not None else dmonth
dailyStats = | pd.read_json(ser) | pandas.read_json |
# coding:utf-8
import datetime as dt
import numpy as np
import pandas as pd
import sys
from .deprecations import deprecated_kwargs
from . import utils
from copy import deepcopy
from collections import Iterable
from openpyxl import load_workbook
from openpyxl.cell.cell import get_column_letter
from openpyxl.xml.functions import fromstring, QName
from openpyxl.utils import cell
PY2 = sys.version_info[0] == 2
# Python 2
if PY2:
# noinspection PyUnresolvedReferences
from container import Container
# noinspection PyUnresolvedReferences
from series import Series
# noinspection PyUnresolvedReferences
from styler import Styler, ColorScaleConditionalFormatRule
# Python 3
else:
from StyleFrame.container import Container
from StyleFrame.styler import Styler, ColorScaleConditionalFormatRule
from StyleFrame.series import Series
try:
pd_timestamp = pd.Timestamp
except AttributeError:
pd_timestamp = pd.tslib.Timestamp
str_type = basestring if PY2 else str
unicode_type = unicode if PY2 else str
class StyleFrame(object):
"""
A wrapper class that wraps pandas DataFrame.
Stores container objects that have values and Styles that will be applied to excel
"""
P_FACTOR = 1.3
A_FACTOR = 13
def __init__(self, obj, styler_obj=None):
from_another_styleframe = False
from_pandas_dataframe = False
if styler_obj and not isinstance(styler_obj, Styler):
raise TypeError('styler_obj must be {}, got {} instead.'.format(Styler.__name__, type(styler_obj).__name__))
if isinstance(obj, pd.DataFrame):
from_pandas_dataframe = True
if obj.empty:
self.data_df = deepcopy(obj)
else:
self.data_df = obj.applymap(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, pd.Series):
self.data_df = obj.apply(lambda x: Container(x, deepcopy(styler_obj)) if not isinstance(x, Container) else x)
elif isinstance(obj, (dict, list)):
self.data_df = | pd.DataFrame(obj) | pandas.DataFrame |
#! /usr/bin/env python
import pandas as pd
import numpy as np
import argparse
from collections import defaultdict
from progressbar import ProgressBar
import networkx as nx
import os
import sys
def load_mesh_graph(infile):
disease2code = defaultdict(list)
code2name = {}
codes = []
for i in open(infile).readlines():
name, code = i.rstrip().split(';')
if code.startswith('C'):
disease2code[name.lower()].append(code)
code2name[code] = name.lower()
codes.append(code)
edges = []
for i in set(codes):
if len(i) > 4:
a, b = i[:-4], i
edges.append((a,b))
g = nx.DiGraph()
g.add_edges_from(edges)
return(g, disease2code, code2name)
if __name__ == '__main__':
description = 'Expand associations in MeSH tree'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-m', required=True, dest='mesh',
action='store',help='MeSH File')
parser.add_argument('-i', required=True, dest='infile',action='store',
help='association table')
parser.add_argument('-o', dest='outfile', action='store',
help='outfile',
default= 'expanded.csv')
parser.add_argument('-d', dest = 'direct_evidence', action = 'store',
help = 'DirectEvidence label, default = therapeutic',
default = 'therapeutic')
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
else:
args = parser.parse_args()
meshfile = args.mesh
infile = args.infile
outfile = args.outfile
direct_evidence = args.direct_evidence
## open mesh tree
#meshfile = '/home/italodovalle/flavonoids/data/databases/mesh/mtrees2018.bin'
g, disease2code, code2name = load_mesh_graph(meshfile)
### manual curation of mesh to disease codes
curation = {'MESH:D056486':['C06.552.195','C25.100.562','C25.723.260'],
'MESH:C535575':['C04.557.470.200.400','C04.557.470.700.400'],
'MESH:C538231':['C04.557.470.200.025'],
'MESH:C537171':['C08.381.742'],
'MESH:D024821':['C18.452.394.968.500.570','C18.452.625'],
'MESH:D000860':['C23.888.852.079'],
'MESH:C536572':['C15.378.071.085','C15.378.190.196','C15.378.190',
'C15.378.071.141.560','C15.378.190.625.460'],
'MESH:C537948':['C10.574.500.550','C16.320.400.600','C16.320.565.398.641.509',
'C18.452.584.687.509','C18.452.648.398.641.509'],
'MESH:C563161':['C12.777.419.331','C13.351.968.419.331','C14.907.489.631','C12.777.419.570',
'C13.351.968.419.570'],
'MESH:C562839':['C04.588.894.797.520','C08.381.540','C08.785.520',
'C04.557.470.035.510','C04.557.470.660.510'],
'MESH:C538339':['C04.557.470.200','C04.588.443.665.710.650',
'C07.550.350.650','C07.550.745.650',
'C09.647.710.650','C09.775.350.650','C09.775.549.650'],
'MESH:C538339':['C04.588.443.665.710.650','C07.550.350.650',
'C07.550.745.650','C09.647.710.650',
'C09.775.350.650','C09.775.549.650',
'C04.557.470.200'],
'MESH:C564616':['C10.228.140.252.190.530','C10.228.140.252.700.700',
'C10.228.854.787.875','C10.574.500.825.700',
'C10.597.350.090.500.530','C16.320.400.780.875'],
'MESH:C536914':['C04.588.322.894','C04.588.443.915',
'C19.344.894','C19.874.788',
'C04.557.465.625.650.240','C04.557.470.200.025.370',
'C04.557.580.625.650.240']
}
ctd = pd.read_csv(infile,index_col = 0)
print ('%d Chemical-Disease Associations'%ctd.shape[0])
ctd['disease'] = [i.lower() for i in ctd['disease']]
## format cols in association file
### ['chemical', 'disease', 'DirectEvidence']
ctd = ctd[~ctd.DirectEvidence.isnull()]
ctd = ctd[ctd.DirectEvidence == direct_evidence]
## all possible chemical disease associations
table = defaultdict(dict)
c = 0
for i in set(ctd.chemical):
for j in set(disease2code.keys()):
table[c]['chemical'] = i
table[c]['disease'] = j
table[c]['DirectEvidence'] = float('nan')
c = c + 1
table = pd.DataFrame.from_dict(table, orient='index')
merged = pd.merge(ctd, table,on = ['chemical', 'disease'],how='outer')
merged['DirectEvidence'] = float('nan')
merged.loc[(~merged['DirectEvidence_x'].isnull()), 'DirectEvidence'] = direct_evidence
merged = merged[['chemical', 'disease', 'DirectEvidence']]
print (merged[~merged.DirectEvidence.isnull()].shape[0], 'interactions DirectEvidence')
print (len(set(merged[~merged.DirectEvidence.isnull()].chemical)), 'chemicals DirectEvidence')
print (len(set(merged[~merged.DirectEvidence.isnull()].disease)), 'diseases DirectEvidence')
print ('\nCreating chemical2code matrix\n')
chemical2code = defaultdict(dict)
pbar = ProgressBar()
for i in pbar(merged.index):
disease = merged.disease.loc[i]
if disease in disease2code.keys():
chemical = merged.chemical.loc[i]
## all possible codes for the disease
codes = disease2code[disease]
for code in codes:
if merged.DirectEvidence.loc[i] == direct_evidence:
chemical2code[chemical][code] = 1
else:
chemical2code[chemical][code] = 0
chemical2code = | pd.DataFrame.from_dict(chemical2code) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 10:30:28 2018
@author: suvod
"""
from __future__ import division
import sys
sys.path.append("..")
from api import git_access,api_access
from git_log import git2repo,buggy_commit
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import os
import re
import networkx as nx
import platform
from os.path import dirname as up
class git2data(object):
def __init__(self,access_token,repo_owner,source_type,git_url,api_base_url,repo_name):
self.repo_name = repo_name
if platform.system() == 'Darwin' or platform.system() == 'Linux':
self.data_path = up(os.getcwd()) + '/data/'
else:
self.data_path = up(os.getcwd()) + '\\data\\'
if not os.path.exists(self.data_path):
os.makedirs(self.data_path)
self.git_client = api_access.git_api_access(access_token,repo_owner,source_type,git_url,api_base_url,repo_name)
self.git_repo = git2repo.git2repo(git_url,repo_name)
print('giturl:',git_url)
self.repo = self.git_repo.clone_repo()
def get_api_data(self):
self.git_issues = self.git_client.get_issues(url_type = 'issues',url_details = '')
self.git_releases = self.git_client.get_releases(url_type = 'releases',url_details = '')
self.git_issue_events = self.git_client.get_events(url_type = 'issues',url_details = 'events')
self.git_issue_comments = self.git_client.get_comments(url_type = 'issues',url_details = 'comments')
self.user_map = self.git_client.get_users()
def get_commit_data(self):
#print("Inside get_commit_data in git2data")
self.git_commits = self.git_repo.get_commits()
def get_committed_files(self):
#print("Inside get_commit_data in git2data")
self.git_committed_files = self.git_repo.get_committed_files()
return self.git_committed_files
def create_link(self):
issue_df = | pd.DataFrame(self.git_issues, columns = ['Issue_number','user_logon','author_type','Desc','title','lables']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests as rq
import pandas_datareader.data as web
from io import StringIO
# download market benchmark data
# indices: ^dji, ^spx
class MarketData:
def __init__(self, datafile='', index='^spx'):
self.datafile = datafile
self.index = index
self._date_fmt = '{:%Y%m%d}'
# convert dates to required format
def _dates(self, date):
return self._date_fmt.format(date)
# startDate, endDate in yyyymmdd format
def _get_market_index(self, start_date, end_date):
url = "https://stooq.com/q/d/l/?s={}&d1={}&d2={}&i=d"
url = url.format(self.index, start_date, end_date)
# get content
content = rq.get(url=url, verify=False).content
df = pd.read_csv(StringIO(content.decode('utf8')))
df.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Volume': 'volume'},
inplace=True)
# check if empty - e.g. update existing over weekend
try:
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True, drop=True)
except:
print('Warning: Market index data is empty!')
None
return df
# returns panel using goodle finance server, pause is required to avoid ban
def _get_historical_prices(self, tickers, start_date, end_date):
# MorningStar provides a multiindex DF, so we need to convert it to
# panelframe consistent with other routines
pf = web.DataReader(tickers, 'tiingo', start_date, end_date)
pf = pf.to_panel()
pf = pf.swapaxes(1, 2)
### STOOQ section - working, but stooq has strict daily limits
# # need to append ".US" to every symbol to read from stooq
# # start and end dates are not implemented for stooq
# st = StooqDailyReader(
# symbols=[i+'.US' for i in tickers],
# start=start_date, end=end_date,
# retry_count=3, pause=0.001, session=None, chunksize=25)
# pf = st.read()
# pf = pf.astype(np.float32)
# # change tickers back to Robinhood style
# pf.minor_axis = [i[:-3] for i in pf.minor_axis]
# st.close()
return pf
# return all stocks and index in one panel
def download_save_market_data(self, tickers, start_date, end_date,
update_existing=False):
start_date_str = self._date_fmt.format(start_date)
end_date_str = self._date_fmt.format(end_date)
print("Downloading market data for {}-{}".format(
start_date_str, end_date_str))
# add market index
pf = self._get_historical_prices(
tickers,
start_date.date(),
end_date.date())
market_data = self._get_market_index(
start_date_str, end_date_str)
pf.loc[:, :, 'market'] = market_data
if update_existing:
new_dict = {}
pf_old = pd.read_hdf(self.datafile, 'market')
pf_new = | pd.concat([pf_old, pf], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
| tm.assert_almost_equal(sliced.values, expected.values) | pandas.util.testing.assert_almost_equal |
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# DataManager
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
import pandas as pd
from typing import List, Dict, Tuple, Optional
# Typing aliases
Inputs = Dict[str, pd.DataFrame]
Outputs = Dict[str, pd.DataFrame]
class DataManager(object):
"""A DataManager is a container of original scenario and intermediate data.
It typically contains the input and output dictionaries with DataFrames that came from
or will be inserted into a DO scenario.
In addition it will hold any intermediate data.
It holds methods that operate on and convert the data.
When used in combination with an optimization engine, it should not contain the
docplex code that creates or interacts with the docplex Model. (That is the task of the OptimizationEngine.)
One of the reasons to separate the DataManager from the OptimizationEngine is to re-use the DataManager,
e.g. for output visualization notebooks.
A typical DataManager:
* Prepares the input DataFrames (like selecting and renaming columns and indexing) and assigns them to a direct attribute.
* Contains a set of methods that create intermediate data ('pre-processing'). Intermediate data will also be assigned as a direct member property.
"""
def __init__(self, inputs: Optional[Inputs] = None, outputs: Optional[Outputs] = None):
self.inputs = inputs
self.outputs = outputs
return
def prepare_data_frames(self):
if (self.inputs is not None) and (len(self.inputs) > 0):
self.prepare_input_data_frames()
if (self.outputs is not None) and (len(self.outputs) > 0):
self.prepare_output_data_frames()
def prepare_input_data_frames(self):
"""Placeholder to process input data frames, in particular to set the index and
to assign dataframes to a direct property of the DataManager.
Make sure to test if table-name exists in input dict so we can re-use this class in e.g.
DashEnterprise apps where not the whole scenario is loaded.
Example::
if 'MyTable' in self.inputs:
self.my_table = self.inputs['MyTable'].set_index('Id', verify_integrity=True)
"""
pass
def prepare_output_data_frames(self):
"""Placeholder to process output data frames.
Processes the default 'kpis' table.
"""
if 'kpis' in self.outputs and self.outputs['kpis'].shape[0] > 0:
"""Note: for some reason an imported scenario uses 'Name' and 'Value' as column names!"""
df = self.outputs['kpis']
df.columns= df.columns.str.upper()
self.kpis = (df
.set_index(['NAME'], verify_integrity = True)
)
def print_hello(self):
"""FOR TESTING: Print some hello string.
Prints some message. To test reloading of the package from a notebook.
Usage::
(In notebook cell #1)
from dse_do_utils import DataManager
dm = DataManager()
(In cell #2)
dm.print_hello()
Change the test of the string. Upload the module to WSL.
If testing autoreload, rerun the second cell only. Verify it prints the updated string.
If testing imp.reload, rerun the notebook from the start.
"""
print("Hello world #1")
# def pp_parameters(self):
# """
# Deprecated
# Returns:
#
# """
# return self.prep_parameters()
def prep_parameters(self) -> pd.DataFrame:
"""Pre-process the Parameter(s) input table.
Assumes the inputs contains a table named `Parameter` or `Parameters` with key `param` and column `value`.
Otherwise, creates a blank DataFrame instance.
"""
if 'Parameter' in self.inputs.keys():
params = self.inputs['Parameter'].set_index(['param'], verify_integrity=True)
elif 'Parameters' in self.inputs.keys():
params = self.inputs['Parameters'].set_index(['param'], verify_integrity=True)
else:
params = pd.DataFrame(columns=['param', 'value']).set_index('param')
# self.params = params
return params
@staticmethod
def get_parameter_value(params, param_name: str, param_type: Optional[str] = None, default_value=None,
value_format: str = '%Y-%m-%d %H:%M:%S'):
"""
Get value of parameter from the parameter table (DataFrame).
Note that if the input table has a mix of data types in the value column, Pandas can change the data type of a
parameter depending on what other values are used in other rows.
This requires the explicit conversion to the expected data type.
Args:
params (indexed DataFrame with parameters): Index = 'param', value in 'value' column.
param_name (str): Name of parameter.
param_type (str): Type of parameter. Valid param_type values are int, float, str, bool, datetime.
default_value: Value if param_name not in index.
value_format (str): Format for datetime conversion.
Returns:
"""
from datetime import datetime
# assert 'param' in params.index #Not absolutely necessary, as long as single index
assert 'value' in params.columns
if param_name in params.index:
raw_param = params.loc[param_name].value
if param_type == 'int':
# Unfortunately, Pandas may sometimes convert a 0 to a FALSE, etc.
if str(raw_param).lower() in ['false', 'f', 'no', 'n', '0', '0.0']:
param = 0
elif str(raw_param).lower() in ['true', 't', 'yes', 'y', '1', '1.0']:
param = 1
else:
param = int(
float(raw_param)) # by first doing the float, a value of '1.0' will be converted correctly
elif param_type == 'float':
# Unfortunately, Pandas may sometimes convert a 0 to a FALSE, etc.
if str(raw_param).lower() in ['false', 'f', 'no', 'n', '0', '0.0']:
param = 0
elif str(raw_param).lower() in ['true', 't', 'yes', 'y', '1', '1.0']:
param = 1
else:
param = float(raw_param)
elif param_type == 'str':
param = str(raw_param)
elif param_type == 'bool':
# Note that the function `bool()` does not do what you expect!
# Note that the type of the raw_param could be a Python bool, string, or Numpy Bool
# (see http://joergdietrich.github.io/python-numpy-bool-types.html)
# param = (str(raw_param) == 'True')
param = (str(raw_param).lower() in ['true', 'yes', 'y', 't', '1', '1.0'])
elif param_type == 'datetime':
param = datetime.strptime(raw_param, value_format)
else:
param = raw_param
else:
print('Warning: {} not in Parameters'.format(param_name))
# If datetime, the default value can be a string
import six # For Python 2 and 3 compatibility of testing string instance
if param_type == 'datetime' and isinstance(default_value, six.string_types):
param = datetime.strptime(default_value, value_format)
else:
param = default_value
return param
@staticmethod
def df_crossjoin_si(df1: pd.DataFrame, df2: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
Make a cross join (cartesian product) between two dataframes by using a constant temporary key.
Assumes both input dataframes have a single index column.
Returns a dataframe with a MultiIndex that is the cartesian product of the indices of the input dataframes.
See: https://github.com/pydata/pandas/issues/5401
See https://mkonrad.net/2016/04/16/cross-join--cartesian-product-between-pandas-dataframes.html
Args:
df1 (DataFrame): dataframe 1
df2 (DataFrame): dataframe 2
kwargs keyword arguments that will be passed to pd.merge()
Returns:
(DataFrame) cross join of df1 and df2
"""
# The copy() allows the original df1 to select a sub-set of columns of another DF without a Pandas warning
df1 = df1.copy()
df2 = df2.copy()
df1['_tmpkey'] = 1
df2['_tmpkey'] = 1
res = | pd.merge(df1, df2, on='_tmpkey', **kwargs) | pandas.merge |
"""
@author : <NAME>(<EMAIL>)
"""
#%% Imports
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from collections import defaultdict
from helpers import pairwiseDistCorr,nn_reg,nn_arch,reconstructionError
from matplotlib import cm
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.random_projection import SparseRandomProjection, GaussianRandomProjection
from itertools import product
out = './RP/'
cmap = cm.get_cmap('Spectral')
np.random.seed(0)
digits = pd.read_hdf('./BASE/datasets.hdf','digits')
digitsX = digits.drop('Class',1).copy().values
digitsY = digits['Class'].copy().values
biodeg = pd.read_hdf('./BASE/datasets.hdf','biodeg')
biodegX = biodeg.drop('Class',1).copy().values
biodegY = biodeg['Class'].copy().values
biodegX = StandardScaler().fit_transform(biodegX)
digitsX= StandardScaler().fit_transform(digitsX)
clusters = [2,5,10,15,20,25,30,35,40]
dims = [2,5,10,15,20,25,30,35,40,45,50,55,60]
dimsb = [2,5,7,10,15,20,25,30,35]
#raise
#%% data for 1
tmp = defaultdict(dict)
for i,dim in product(range(10),dimsb):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(biodegX), biodegX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'biodeg scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(digitsX), digitsX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'digits scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dimsb):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(biodegX)
tmp[dim][i] = reconstructionError(rp, biodegX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'biodeg scree2.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(digitsX)
tmp[dim][i] = reconstructionError(rp, digitsX)
tmp = | pd.DataFrame(tmp) | pandas.DataFrame |
"""
This file contains python3.6+ syntax!
Feel free to import and use whatever new package you deem necessary.
"""
import dataclasses
import json
import logging
import pandas as pd
from pandas import HDFStore
from typing import Optional, List, Any, Iterable, Tuple, Set
from apicheck.db import ProxyLogs, get_engine
from apicheck.exceptions import APICheckException
from .config import RunningConfig
logger = logging.getLogger("apicheck")
def json_to_columns(df, column):
return df[column].apply(json.loads).apply(pd.Series)
def run(running_config: RunningConfig):
target = HDFStore(running_config.fout)
df = pd.read_sql_table(
"proxy_logs",
"sqlite:///mydatabase.sqlite3",
index_col='id'
)
request = json_to_columns(df, 'request')
request["session"] = df["proxy_session_id"]
response = json_to_columns(df, 'response')
response["session"] = df["proxy_session_id"]
request_headers = request['headers'].apply(pd.Series)
response_headers = response['headers'].apply(pd.Series)
request = request.drop("headers", 1)
request_headers_norm = pd.melt(
request_headers.reset_index(), id_vars=["id"], var_name="header"
)
request_headers_norm = request_headers_norm.dropna()
request_headers_norm["type"] = "request"
response = response.drop("headers", 1)
response_headers_norm = pd.melt(
response_headers.reset_index(), id_vars=["id"], var_name="header"
)
response_headers_norm = response_headers_norm.dropna()
response_headers_norm["type"] = "response"
headers_norm = | pd.concat([request_headers_norm, response_headers_norm]) | pandas.concat |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import json
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import pandasDF2MD
from brightics.common.repr import dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.validation import raise_runtime_error
from brightics.common.validation import raise_error
import sklearn.utils as sklearn_utils
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than, require_param
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def mlp_classification_train(table, group_by=None, **params):
check_required_parameters(_mlp_classification_train, params, ['table'])
params = get_default_from_parameters_if_required(params, _mlp_classification_train)
if (params['batch_size_auto']):
param_validation_check = [greater_than(params, 0.0, 'learning_rate_init'),
greater_than(params, 0.0, 'tol')]
else:
if not params['batch_size'] or not isinstance(params['batch_size'], int):
param_validation_check = [require_param('batch_size')]
validate(*param_validation_check)
param_validation_check = [greater_than(params, 0, 'batch_size'),
greater_than(params, 0.0, 'learning_rate_init'),
greater_than(params, 0.0, 'tol')]
validate(*param_validation_check)
if group_by is not None:
grouped_model = _function_by_group(_mlp_classification_train, table, group_by=group_by, **params)
return grouped_model
else:
return _mlp_classification_train(table, **params)
def _mlp_classification_train(table, feature_cols, label_col, hidden_layer_sizes=(100,), activation='relu', solver='adam', alpha=0.0001, batch_size_auto=True, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, max_iter=200, random_state=None, tol=0.0001):
feature_names, features = check_col_type(table, feature_cols)
label = table[label_col]
if(sklearn_utils.multiclass.type_of_target(label) == 'continuous'):
raise_error('0718', 'label_col')
mlp_model = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation, solver=solver, alpha=alpha, batch_size=batch_size, learning_rate=learning_rate, learning_rate_init=learning_rate_init, max_iter=max_iter, shuffle=True, random_state=random_state, tol=tol)
mlp_model.fit(features, label)
predict = mlp_model.predict(features)
_accuracy_score = accuracy_score(label, predict)
_f1_score = f1_score(label, predict, average='micro')
_precision_score = precision_score(label, predict, average='micro')
_recall_score = recall_score(label, predict, average='micro')
# summary = pd.DataFrame({'features': feature_names})
# coef_trans = np.transpose(coefficients)
# summary = pd.concat((summary, pd.DataFrame(coef_trans, columns=classes)), axis=1)
result_table = pd.DataFrame.from_items([
['Metric', ['Accuracy Score', 'F1 Score', 'Precision Score', 'Recall Score']],
['Score', [_accuracy_score, _f1_score, _precision_score, _recall_score]]
])
label_name = {
'hidden_layer_sizes': 'Hidden Layer Sizes',
'activation': 'Activation Function',
'solver': 'Solver',
'alpha': 'Alpha',
'batch_size': 'Batch Size',
'learning_rate': 'Learning Rate',
'learning_rate_init': 'Learning Rate Initial',
'max_iter': 'Max Iteration',
'random_state': 'Seed',
'tol': 'Tolerance'}
get_param = mlp_model.get_params()
param_table = pd.DataFrame.from_items([
['Parameter', list(label_name.values())],
['Value', [get_param[x] for x in list(label_name.keys())]]
])
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ### MLP Classification Result
| {result}
| ### Parameters
| {list_parameters}
""".format(result=pandasDF2MD(result_table), list_parameters=pandasDF2MD(param_table)
)))
model = _model_dict('mlp_classification_model')
model['features'] = feature_cols
model['label'] = label_col
model['intercepts'] = mlp_model.intercepts_
model['coefficients'] = mlp_model.coefs_
model['class'] = mlp_model.classes_
model['loss'] = mlp_model.loss_
model['accuracy_score'] = _accuracy_score
model['f1_score'] = _f1_score
model['precision_score'] = _precision_score
model['recall_score'] = _recall_score
model['activation'] = activation
model['solver'] = solver
model['alpha'] = alpha
model['batch_size'] = batch_size
model['learning_rate'] = learning_rate
model['learning_rate_init'] = learning_rate_init
model['max_iter'] = max_iter
model['random_state'] = random_state
model['tol'] = tol
model['mlp_model'] = mlp_model
model['_repr_brtc_'] = rb.get()
# model['summary'] = summary
return {'model' : model}
def mlp_classification_predict(table, model, **params):
check_required_parameters(_mlp_classification_predict, params, ['table', 'model'])
if '_grouped_data' in model:
return _function_by_group(_mlp_classification_predict, table, model, **params)
else:
return _mlp_classification_predict(table, model, **params)
def _mlp_classification_predict(table, model, prediction_col='prediction', prob_prefix='probability',
output_log_prob=False, log_prob_prefix='log_probability', thresholds=None,
suffix='index'):
feature_cols = model['features']
feature_names, features = check_col_type(table, feature_cols)
mlp_model = model['mlp_model']
classes = mlp_model.classes_
len_classes = len(classes)
is_binary = len_classes == 2
if thresholds is None:
thresholds = np.array([1 / len_classes for _ in classes])
elif isinstance(thresholds, list):
if len(thresholds) == 1 and is_binary and 0 < thresholds[0] < 1:
thresholds = np.array([thresholds[0], 1 - thresholds[0]])
else:
thresholds = np.array(thresholds)
len_thresholds = len(thresholds)
if len_classes > 0 and len_thresholds > 0 and len_classes != len_thresholds:
# FN-0613='%s' must have length equal to the number of classes.
raise_error('0613', ['thresholds'])
prob = mlp_model.predict_proba(features)
prediction = classes[np.argmax(prob / thresholds, axis=1)]
out_table = table.copy()
out_table[prediction_col] = prediction
if suffix == 'index':
suffixes = [i for i, _ in enumerate(classes)]
else:
suffixes = classes
prob_cols = ['{probability_col}_{suffix}'.format(probability_col=prob_prefix, suffix=suffix) for suffix in suffixes]
prob_df = pd.DataFrame(data=prob, columns=prob_cols)
if output_log_prob:
log_prob = mlp_model.predict_log_proba(features)
logprob_cols = ['{log_probability_col}_{suffix}'.format(log_probability_col=log_prob_prefix, suffix=suffix) for suffix in suffixes]
logprob_df = pd.DataFrame(data=log_prob, columns=logprob_cols)
out_table = | pd.concat([out_table, prob_df, logprob_df], axis=1) | pandas.concat |
import os
from pathlib import Path
import pandas as pd
from autofe.feature_engineering.gbdt_feature import LightGBMFeatureTransformer
from autogluon.tabular import TabularPredictor
from pytorch_widedeep.utils import LabelEncoder
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
SEED = 42
if __name__ == '__main__':
ROOTDIR = Path('/home/robin/jianzh/autotabular/examples/automlbechmark')
PROCESSED_DATA_DIR = ROOTDIR / 'data/processed_data/adult/'
RESULTS_DIR = ROOTDIR / 'results/adult/autogluon'
if not RESULTS_DIR.is_dir():
os.makedirs(RESULTS_DIR)
adult_data = | pd.read_csv(PROCESSED_DATA_DIR / 'adult_autogluon.csv') | pandas.read_csv |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = | pd.Period("2017-09-02") | pandas.Period |
#!/usr/bin/env python
import pandas as pd
#Full outer join on the 3 tables produced based on the 3 XML Orphanet files.
filename1 = "orphanet_xml1_parsed"
filename4 = "orphanet_xml4_parsed"
filename6 = "orphanet_xml6_parsed"
f1_df = pd.read_csv(filename1, sep="\t", index_col=False, dtype=str, na_filter = False, encoding="latin")
f4_df = pd.read_csv(filename4, sep="\t", index_col=False, dtype=str, na_filter = False, encoding="latin")
f6_df = pd.read_csv(filename6, sep="\t", index_col=False, dtype=str, na_filter = False, encoding="latin")
df_merge1 = | pd.merge(f1_df, f6_df, on='orphanet_id', how='outer') | pandas.merge |
import json
import os
from pathlib import Path
import pandas as pd
dirname = os.path.dirname(__file__)
def load_parsed_dyno(traj_path):
"""Loads the parsed trajectory from the parser
Args:
traj_path ([type]): Path to the parsed traj
Returns:
[type]: trajectory as pd.DataFrame, number of observations as int
"""
with open(traj_path) as f:
data = json.load(f)
time_ser = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import datetime
import dateutil.parser
import Utils
#
# given a synthea object, covert it to it's equivalent omop objects
#
class SyntheaToOmop6:
#
# Check the model matches
#
def __init__(self, model_schema, utils):
self.model_schema = model_schema
self.utils = utils
#
# synthea patients to omop
#
def patientsToOmop(self, df, personmap, person_id, location_id):
#df = df.sort_values('Id') sort to get better match to original synthea to omop conversion for comparison
df['persontmp'] = df.index + person_id # copy index into a temp column. If accessed directly corrupts dataframe
df['locationtmp'] = df.index + location_id # copy index into a temp column. If accessed directly corrupts dataframe
person = pd.DataFrame(columns=self.model_schema['person'].keys())
person['person_id'] = df['persontmp']
person['gender_concept_id'] = df['GENDER'].apply(self.utils.getGenderConceptCode)
person['year_of_birth'] = df['BIRTHDATE'].apply(self.utils.getYearFromSyntheaDate)
person['month_of_birth'] = df['BIRTHDATE'].apply(self.utils.getMonthFromSyntheaDate)
person['day_of_birth'] = df['BIRTHDATE'].apply(self.utils.getDayFromSyntheaDate)
person['race_concept_id'] = df['RACE'].apply(self.utils.getRaceConceptCode)
person['ethnicity_concept_id'] = df['ETHNICITY'].apply(self.utils.getEthnicityConceptCode)
person['birth_datetime'] = df['BIRTHDATE'].apply(self.utils.getDefaultTimestamp)
person['death_datetime'] = df['DEATHDATE'].apply(self.utils.getDefaultTimestamp)
person['location_id'] = df['locationtmp']
person['gender_source_value'] = df['GENDER']
person['person_source_value'] = df['Id']
person['gender_source_concept_id'] = '0'
person['race_source_value'] = df['RACE']
person['race_source_concept_id'] = '0'
person['ethnicity_source_value'] = df['ETHNICITY']
person['ethnicity_source_concept_id'] = '0'
personappend = pd.DataFrame(columns=["person_id","synthea_patient_id"])
personappend["person_id"] = person['person_id']
personappend["synthea_patient_id"] = df['Id']
personmap = personmap.append(personappend)
person = person[person['gender_concept_id'] != 0] # filter out person's with missing or unknown gender
location = pd.DataFrame(columns=self.model_schema['location'].keys())
location['location_id'] = df['locationtmp']
location['address_1'] = df['ADDRESS']
location['city'] = df['CITY']
location['state'] = df['STATE']
location['zip'] = df['ZIP']
location['county'] = df['COUNTY']
location['location_source_value'] = df['Id']
location['latitude'] = df['LAT']
location['longitude'] = df['LON']
# create empty death dataframe
death = pd.DataFrame()
return (person, location, death, personmap, person_id + len(person), location_id + len(location))
def conditionsToOmop(self, df, srctostdvm, condition_occurrence_id, drug_exposure_id, observation_id, personmap, visitmap):
df['conditiontmp'] = df.index + condition_occurrence_id # copy index into a temp column.
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
condition_occurrence = pd.DataFrame(columns=self.model_schema['condition_occurrence'].keys())
condition_occurrence['condition_occurrence_id'] = df['conditiontmp']
condition_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Condition') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
condition_occurrence['condition_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
condition_occurrence['condition_start_date'] = df['START']
condition_occurrence['condition_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_end_date'] = df['STOP']
condition_occurrence['condition_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_type_concept_id'] = '32020'
condition_occurrence['stop_reason'] = '0'
condition_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
condition_occurrence['visit_detail_id'] = '0'
condition_occurrence['condition_source_value'] = df['CODE']
condition_occurrence['condition_source_concept_id'] = df['CODE']
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='RxNorm') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = | pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# # 1 Compiling notebook 2 outputs
# In[1]:
import configparser
import glob
import json
import math
import numpy as np
import pandas as pd
import re
from utils.misc.regex_block import MutationFinder, TmVar, CustomWBregex, normalize_mutations
with open("data/model_output/processed/temp_paper_mut_count.json", "w") as outfile:
json.dump(paper_mut_count, outfile)
print('All', ner_count, 'NER data rows were ignored. Only', regex_count, 'regex data rows were used.')
# saving things
data = pd.DataFrame(data[:], columns=['WBPaper ID', 'Method', 'Genes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'])
data.to_csv("data/model_output/processed/snippets_2.csv", index=False, encoding='utf-8')
# # 3 Normalizing common gene name to its WormBase ID
# And getting the gene and mutation frequency in a paper.
# In[14]:
data = pd.read_csv("data/model_output/processed/snippets_2.csv")
data = data.to_numpy() # 'WBPaper ID', 'Method', 'Genes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
with open("data/model_output/processed/temp_paper_wbgene_count.json", "w") as outfile:
json.dump(paper_wbgene_count, outfile)
# Checking if any detected gene was NOT in the WB gene dictionary
# In[18]:
data = np.array(data)
data[len(data[:,2]) != len(data[:,3])]
# above cell takes a while to complete, so saving the data temporarily
data = pd.DataFrame(data[:], columns=['WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'])
data.to_csv("data/model_output/processed/snippets_3.csv", index=False, encoding='utf-8')
data = None
# # 5 Validation
# Finding the gene and mutation matches using the transcripts in c_elegans.PRJNA13758.WS281.protein.fa
# Get the file here - ftp://ftp.ebi.ac.uk/pub/databases/wormbase/releases/WS281/species/c_elegans/PRJNA13758/c_elegans.PRJNA13758.WS281.protein.fa.gz
data = pd.read_csv("data/model_output/processed/snippets_3.csv")
data = data.to_numpy() # 'WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
proteinfa = Path('data/gsoc/proteinfa/c_elegans.PRJNA13758.WS281.protein.fa').read_text().split('>')[1:]
wb_gene_and_prot = dict() # {wbgene: [transcript, protein]}
for row in proteinfa:
wbgene = re.findall("WBGene[0-9]+", row)[0]
protein = "".join(re.findall("\n.*", row)).replace('\n', '')
transcript = row.split(' ')[0]
if wbgene not in wb_gene_and_prot.keys():
wb_gene_and_prot[wbgene] = []
wb_gene_and_prot[wbgene].append([transcript, protein])
len(wb_gene_and_prot)
# #### Create a pair of gene and mutation only when BOTH are present in same sentence.
# In[24]:
paper_raw_info_compiled = []
# 'WBPaper ID', 'Method', 'Genes', 'WBGenes', '*Gene-Variant combo ', 'Mutations', 'Normalized Mutations', 'Sentence'
for row in data:
ppr_id = row[0]
norm_muts = row[-2]
wbgenes = row[3]
sentence = row[-1]
gene_var = row[4]
# filtering out nan values
if type(norm_muts) != float and type(wbgenes) != float:
norm_muts = norm_muts[1:-1].split("', '")
wbgenes = wbgenes[1:-1].split("', '")
for m in norm_muts:
for g in wbgenes:
if len(m) and len(g):
paper_raw_info_compiled.append([ppr_id, g, m, sentence, gene_var])
# In[25]:
matches = []
final_sheet = [] # ppr_id, gene, transcript
for info_from_ppr in paper_raw_info_compiled:
ppr_id = info_from_ppr[0]
gene = info_from_ppr[1]
mut = info_from_ppr[2]
sent = info_from_ppr[3]
gene_var = info_from_ppr[4]
if not len(mut):
continue
if gene not in wb_gene_and_prot.keys():
continue
for row in wb_gene_and_prot[gene]:
transcript, protein_string = row
wt_res = mut[0]
pos = int(''.join(n for n in mut if n.isdigit()))
mut_res = mut[-1]
try:
if protein_string[pos-1] == wt_res:
matches.append([ppr_id, gene, mut, gene_var, transcript, sent])
except IndexError:
pass
for r in matches:
p = r[0]
p, wbg, mut, gene_var, transcript, sent = r
# Adding gene common names column, again
# Current code doesn't keep any link between the WB gene name and the common name
g_common_name = all_wb_genes[wbg]
g_common_name = ', '.join(g_common_name)
final_sheet.append([p, wbg, g_common_name, mut, gene_var, transcript, sent])
# In[26]:
len(final_sheet)
# #### Getting metadata on genes and mutations, and adding warnings column
# In[27]:
with open("data/model_output/processed/temp_paper_wbgene_count.json", "r") as f:
paper_wbgene_count = json.loads(f.read())
with open("data/model_output/processed/temp_paper_mut_count.json", "r") as f:
paper_mut_count = json.loads(f.read())
# In[28]:
final_sheet = np.array(final_sheet)
updated_sheet = []
for i, row in enumerate(final_sheet):
warnings = []
paper_id = row[0]
wbgene = row[1]
mut = row[3]
sentence = row[-1]
for ppr_mut, count in paper_mut_count[paper_id].items():
if mut == ppr_mut and count == 1:
warnings.append(f'{mut} mentioned only once in entire paper')
break
rows_with_same_mut = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:,3] == mut)]
same_mut_all_genes = list(set(rows_with_same_mut[:, 1]))
# If the same variant is found in two different genes in the same paper - WARN!
# It is more likely to belong to the gene it is most frequently encountered
if len(same_mut_all_genes) > 1:
temp_warn_store = f'{mut} was paired with other genes too:'
for ppr_gene, count in paper_wbgene_count[paper_id].items():
if ppr_gene in same_mut_all_genes:
temp_warn_store += (f' {ppr_gene} (seen {count} times),')
warnings.append(temp_warn_store)
cut_mut = re.sub("([A-Z])([0-9]+)([A-Za-z]+)", r'\1\2', mut)
remaining_mut = mut.replace(cut_mut, "")
same_cut_muts = [i for i,m in enumerate(final_sheet[:,3]) if (m[:len(cut_mut)] == cut_mut and m[len(cut_mut):] != remaining_mut)]
if same_cut_muts:
temp_warn_store = f'{mut} similar to:'
for temp_i in same_cut_muts:
temp_warn_store += (f' {final_sheet[:,3][temp_i]} (line {temp_i}),')
warnings.append(temp_warn_store)
all_muts_in_sentence = data[np.logical_and(data[:, 0] == paper_id, data[:,-1] == sentence)][:,-2]
all_muts_in_sentence = all_muts_in_sentence[0][1:-1].split("', '")
all_matched_muts_in_sentence = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:,-1] == sentence)][:,3]
all_matched_muts_in_sentence = list(set(all_matched_muts_in_sentence))
unmatched_muts_in_sentence = [m for m in all_muts_in_sentence if m not in all_matched_muts_in_sentence]
if len(unmatched_muts_in_sentence) >= 2:
temp_warn_store = f'Sentence has multiple mutations which did not match:'
for m in unmatched_muts_in_sentence:
temp_warn_store += (f' {m},')
warnings.append(temp_warn_store)
all_genes_with_this_mut = final_sheet[np.logical_and(final_sheet[:, 0] == paper_id, final_sheet[:, 3] == mut)][:, 1]
all_genes_with_this_mut = list(set(all_genes_with_this_mut))
if len(all_genes_with_this_mut) > 3:
temp_warn_store = f'{mut} was matched with {len(all_genes_with_this_mut)} genes:'
for g in all_genes_with_this_mut:
temp_warn_store += (f' {g},')
warnings.append(temp_warn_store)
if warnings:
warnings = " || ".join(warnings)
else:
warnings = ""
updated_sheet.append(np.insert(row, -1, warnings).tolist())
# In[29]:
# saving things
updated_sheet = pd.DataFrame(updated_sheet[:], columns=['WBPaper ID', 'WBGene', 'Gene', 'Mutation', 'Gene-Var combo', 'Transcript', 'Warnings', 'Sentence'])
updated_sheet.to_csv("data/model_output/processed/snippets_4.csv", index=False, encoding='utf-8')
updated_sheet = None
# # 6 Additional details
# ### 6.1 Strains
# In[30]:
data = | pd.read_csv("data/model_output/processed/snippets_4.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import os
import _pickle as pickle
from datetime import datetime
from scipy.ndimage import gaussian_filter1d
import sklearn
from sklearn import svm
from sklearn.metrics import confusion_matrix
def run_decoding_analyses():
_config_filename = 'config.pckl'
with open(_config_filename, 'rb') as _file:
_config = pickle.load(_file)
import_path = _config['basic']['data_path']
export_path = _config['basic']['result_path']
os.makedirs(import_path,exist_ok=True)
os.makedirs(export_path,exist_ok=True)
_dir_pckl = os.listdir(import_path)
_sort_str = '.pckl'
_dir_pckl = [x for x in _dir_pckl if x[-len(_sort_str):]==_sort_str]
df_files = pd.DataFrame(_dir_pckl,columns=['filename'])
df_files['cate_name'] = df_files['filename'].apply(lambda x:x.split('.')[0])
df_files['seed'] = df_files['cate_name'].apply(lambda x:int(x.split('_')[-1]))
_file_list = df_files['filename'].unique()
for _i_file,_filename in enumerate(_file_list):
print('>> Decoding dataset ',_i_file+1,' out of ',len(_file_list))
run_decoding_analysis_on_single_round(_filename,import_path,export_path)
def run_decoding_analysis_on_single_round(_filename,import_path,export_path):
start_time = datetime.now()
## load data
_key_set = ['MSO_L','MSO_R']
_load_file = import_path+_filename
print('> loading simulation data:',_load_file)
with open(_load_file, 'rb') as _file:
_loaded_list = pickle.load(_file)
dict_spike,_elapsed,_key_set,start_position_set_R,ITD_set,fs,seed,df_ITD = _loaded_list
## generating spike count dataframe
print('> computing spike counts')
# import spike train
_df_spike_set = []
for _key in _key_set:
_spike_set = list(dict_spike[_key].values())
_firing_rate_matrix = []
_df_spikes = | pd.DataFrame([_spike_set]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import torch
import pandas as pd
import featurizer.functions.time_series_functions as tsf
def macd(tensor, fastperiod=12, slowperiod=26, signalperiod=9):
#DIF = tsf.ema(tensor, fastperiod) - tsf.ema(tensor, slowperiod)
#DEA = tsf.ema(DIF, signalperiod)
#MACD = (DIF - DEA) * 1 # Here is 1 rather than trodational 2
import talib
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
DIF = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[0])
DEA = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[1])
MACD = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[2])
DIF_ts = torch.tensor(DIF.values, dtype=tensor.dtype, device=tensor.device)
DEA_ts = torch.tensor(DEA.values, dtype=tensor.dtype, device=tensor.device)
MACD_ts = torch.tensor(MACD.values, dtype=tensor.dtype, device=tensor.device)
#
return DIF_ts, DEA_ts, MACD_ts
return DIF, DEA, MACD
def rsi(tensor, timeperiod):
import talib
tensor_np = tensor.cpu().detach().numpy()
tensor_df = | pd.DataFrame(tensor_np) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
from sklearn import preprocessing
from scipy.sparse import csr_matrix
from sklearn.cluster import DBSCAN
import datetime
import time
states = ["INITIAL","login","View_Items","home","logout","View_Items_quantity","Add_to_Cart","shoppingcart",
"remove","deferorder","purchasecart","inventory","sellinventory","clearcart","cancelorder","$"]
#Data imports
PATH = "../data/raw/"
sessions_file = (PATH+'sessions.dat')
def session_request_dict(sessions_file):
s_r_dict = {}
# Dict of sessions
with open(sessions_file) as fn:
sessions_raw = fn.readlines()
for session in sessions_raw:
key = re.search('([^.]+)', session).group()
value = re.findall('\"(.*?)\"', session)
s_r_dict[key] = value
return s_r_dict
def transition_matrix(sessions, states):
markovchains = []
for key, value in sessions.items():
# labelEncoding
le = preprocessing.LabelEncoder()
le.fit(value)
le.transform(value)
# factorize
factorize = pd.factorize(value)[0]
# matrix
n = 1 + max(factorize) # number of states
m = [[0] * n for _ in range(n)]
for (i, j) in zip(factorize, factorize[1:]):
m[i][j] += 1
# now convert to probabilities:
for row in m:
s = sum(row)
if s > 0:
row[:] = [f / s for f in row]
# unique array in the right order
value = np.array(value)
_, idx = np.unique(value, return_index=True)
df = pd.DataFrame(data=m, index=value[np.sort(idx)],
columns=value[np.sort(idx)])
df_1 = pd.DataFrame(index=states, columns=states, dtype='float64')
df_1.update(df, join='left')
merge = pd.concat([ | pd.concat([df_1, df], axis=1, sort=False) | pandas.concat |
import nn_closed_loop.example as ex
import numpy as np
from tabulate import tabulate
import pandas as pd
import datetime
import os
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
import argparse
import nn_closed_loop.dynamics as dynamics
import nn_closed_loop.analyzers as analyzers
import nn_closed_loop.constraints as constraints
import nn_closed_loop.sampling_based.randUP as randUP
import nn_closed_loop.sampling_based.kernelUP as kernelUP
import nn_closed_loop.sampling_based.GoTube as GoTube
from nn_closed_loop.utils.nn import load_controller
results_dir = "{}/results/logs/".format(
os.path.dirname(os.path.abspath(__file__))
)
os.makedirs(results_dir, exist_ok=True)
expts = [
# {
# 'partitioner': 'None',
# 'propagator': 'SeparableCROWN',
# 'sampling_based': False,
# },
# {
# 'partitioner': 'None',
# 'propagator': 'SeparableSGIBP',
# 'sampling_based': False,
# },
#
#
#
#
#
{
'partitioner': 'None',
'propagator': 'CROWN',
'sampling_based': False,
'boundaries': 'lp'
},
{
'partitioner': 'Uniform',
'num_partitions': "[4, 4]",
'propagator': 'CROWN',
'sampling_based': False,
'boundaries': 'lp'
},
{
'partitioner': 'None',
'propagator': 'CROWN',
'sampling_based': False,
'boundaries': 'polytope'
},
{
'partitioner': 'Uniform',
'num_partitions': "[4, 4]",
'propagator': 'CROWN',
'sampling_based': False,
'boundaries': 'polytope'
},
# {
# 'partitioner': 'None',
# 'propagator': 'SDP',
# 'cvxpy_solver': 'SCS',
# 'sampling_based': False,
# },
# {
# 'partitioner': 'Uniform',
# 'num_partitions': "[4, 4]",
# 'propagator': 'SDP',
# 'cvxpy_solver': 'SCS',
# 'sampling_based': False,
# },
# {
# 'partitioner': 'randUP',
# 'propagator': 'randUP',
# 'randUP': True,
# 'nb_samples': 1000,
# 'epsilon': 0.02,
# },
{
'partitioner': 'randUP_M100',
'propagator': 'randUP_M100',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 100,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M200',
'propagator': 'randUP_M200',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 200,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M300',
'propagator': 'randUP_M300',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 300,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M500',
'propagator': 'randUP_M500',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 500,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M1k',
'propagator': 'randUP_M1k',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 1000,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M2k',
'propagator': 'randUP_M2k',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 2000,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M3k',
'propagator': 'randUP_M3k',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 3000,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M5k',
'propagator': 'randUP_M5k',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 5000,
'epsilon': 0.0,
},
{
'partitioner': 'randUP_M10k',
'propagator': 'randUP_M10k',
'sampling_based': True,
'boundaries': 'hull',
'randUP': True,
'kernelUP': False,
'GoTube': False,
'nb_samples': 10000,
'epsilon': 0.0,
},
#
#
#
#
#
{
'partitioner': 'kernelUP_M100',
'propagator': 'kernelUP_M100',
'sampling_based': True,
'boundaries': 'kernel',
'randUP': False,
'kernelUP': True,
'GoTube': False,
'nb_samples': 100,
'Lambda': 0.1,
'sigma': 0.05,
},
{
'partitioner': 'kernelUP_M200',
'propagator': 'kernelUP_M200',
'sampling_based': True,
'boundaries': 'kernel',
'randUP': False,
'kernelUP': True,
'GoTube': False,
'nb_samples': 200,
'Lambda': 0.1,
'sigma': 0.05,
},
{
'partitioner': 'kernelUP_M300',
'propagator': 'kernelUP_M300',
'sampling_based': True,
'boundaries': 'kernel',
'randUP': False,
'kernelUP': True,
'GoTube': False,
'nb_samples': 300,
'Lambda': 0.1,
'sigma': 0.05,
},
{
'partitioner': 'kernelUP_M500',
'propagator': 'kernelUP_M500',
'sampling_based': True,
'boundaries': 'kernel',
'randUP': False,
'kernelUP': True,
'GoTube': False,
'nb_samples': 500,
'Lambda': 0.1,
'sigma': 0.02,
},
{
'partitioner': 'kernelUP_M1k',
'propagator': 'kernelUP_M1k',
'sampling_based': True,
'boundaries': 'kernel',
'randUP': False,
'kernelUP': True,
'GoTube': False,
'nb_samples': 1000,
'Lambda': 0.1,
'sigma': 0.05,
},
#
#
#
#
#
{
'partitioner': 'GoTube_M100',
'propagator': 'GoTube_M100',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 100,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M200',
'propagator': 'GoTube_M200',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 200,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M300',
'propagator': 'GoTube_M300',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 300,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M500',
'propagator': 'GoTube_M500',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 500,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M1k',
'propagator': 'GoTube_M1k',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 1000,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M2k',
'propagator': 'GoTube_M2k',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 2000,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M3k',
'propagator': 'GoTube_M3k',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 3000,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M5k',
'propagator': 'GoTube_M5k',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 5000,
'epsilon': 0.0,
},
{
'partitioner': 'GoTube_M10k',
'propagator': 'GoTube_M10k',
'sampling_based': True,
'boundaries': 'ball',
'randUP': False,
'kernelUP': False,
'GoTube': True,
'nb_samples': 10000,
'epsilon': 0.0,
},
]
class Experiment:
def __init__(self):
self.info = {
('CROWN', 'Uniform'): {
'name': 'Reach-LP-Partition',
'color': 'tab:green',
'ls': '-',
},
('CROWN', 'None'): {
'name': 'Reach-LP',
'color': 'tab:green',
'ls': '--',
},
('SDP', 'Uniform'): {
'name': 'Reach-SDP-Partition',
'color': 'tab:red',
'ls': '-',
},
('SDP', 'None'): {
'name': 'Reach-SDP~\cite{hu2020reach}',
'color': 'tab:red',
'ls': '--',
},
('SeparableCROWN', 'None'): {
'name': 'CL-CROWN',
},
('SeparableSGIBP', 'None'): {
'name': 'CL-SG-IBP~\cite{xiang2020reachable}',
},
('randUP', 'randUP'): {
'name': 'RandUP',
'color': 'k',
'ls': '-',
},
('randUP_M1k', 'randUP_M1k'): {
'name': 'RandUP_M1k',
'color': 'k',
'ls': '-',
},
('randUP_M10k', 'randUP_M10k'): {
'name': 'RandUP_M10k',
'color': 'k',
'ls': '-',
},
('kernelUP_M100', 'kernelUP_M100'): {
'name': 'kernelUP_M100',
'color': 'tab:blue',
'ls': '-',
},
('kernelUP_M300', 'kernelUP_M300'): {
'name': 'kernelUP_M300',
'color': 'tab:blue',
'ls': '-',
},
('kernelUP_M1k', 'kernelUP_M1k'): {
'name': 'kernelUP_M1k',
'color': 'tab:blue',
'ls': '-',
},
('GoTube_M1k', 'GoTube_M1k'): {
'name': 'GoTube_M1k',
'color': 'tab:red',
'ls': '-',
},
('GoTube_M3k', 'GoTube_M3k'): {
'name': 'GoTube_M3k',
'color': 'tab:red',
'ls': '-',
},
('GoTube_M10k', 'GoTube_M10k'): {
'name': 'GoTube_M10k',
'color': 'tab:red',
'ls': '-',
},
}
class NNVerifExperiment(Experiment):
def __init__(self, system="double_integrator",
controller_model="double_integrator",
filename=""):
if filename == "":
self.filename = results_dir + 'exp_{dt}.pkl'
else:
self.filename = filename
self.system = system
self.controller_model = controller_model
Experiment.__init__(self)
def run(self, expts):
dt = datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
parser = ex.setup_parser()
args = parser.parse_args()
args.save_plot = False
args.show_plot = False
args.make_animation = False
args.show_animation = False
args.init_state_range = "[[2.5, 3.0], [-0.1, 0.1]]"
args.state_feedback = True
args.system = self.system
args.controller_model = self.controller_model
args.t_max = 4 # 9
args.estimate_runtime = True
args.num_calls = 2#100
df = pd.DataFrame()
for expt in expts:
for key, value in expt.items():
setattr(args, key, value)
if args.sampling_based:
args.boundaries = "lp"
else:
if expt['boundaries']=='lp':
args.boundaries = 'lp'
elif expt['boundaries']=='polytope':
args.boundaries = 'polytope'
else:
raise NotImplementedError("Unimplemented boundary type.")
stats, info = ex.main(args)
nb_samples = 0
eps_pad = 0.
Lambda = 0.
sigma = 0.
if args.sampling_based:
nb_samples = args.nb_samples
if args.randUP or args.GoTube:
eps_pad = args.epsilon
elif args.kernelUP:
Lambda = args.Lambda
sigma = args.sigma
for i, runtime in enumerate(stats['runtimes']):
df = df.append({
**expt,
'run': i,
'runtime': runtime,
'output_constraint': stats['output_constraints'][i],
'area_final_step_error': stats['area_final_step_errors'][i],
'area_avg_error': stats['area_avg_errors'][i],
'area_all_errors': stats['area_all_errors'][i],
'haus_final_step_error': stats['haus_final_step_errors'][i],
'haus_avg_error': stats['haus_avg_errors'][i],
'haus_all_errors': stats['haus_all_errors'][i],
'B_all_conserv': stats['B_all_conserv'][i],
'B_vec_conserv': stats['B_vec_conserv'][i],
'nb_samples': nb_samples,
'eps_pad': eps_pad,
'Lambda': Lambda,
'sigma': sigma,
}, ignore_index=True)
df.to_pickle(self.filename.format(dt=dt))
def grab_latest_groups(self):
# Grab latest file as pandas dataframe
list_of_files = glob.glob(self.filename.format(dt='*'))
latest_filename = max(list_of_files, key=os.path.getctime)
df = | pd.read_pickle(latest_filename) | pandas.read_pickle |
"""Compile 1/f-exponents across sessions for further computation in R."""
import os
import pandas as pd
import numpy as np
df = pd.read_csv('../csv/sessions.csv')
fmin = 1
fmax = 10
rsquare_threshold = 0.95
exp_folder = '../results/exponents/'
dfs = []
nr_segments = []
for subject in df.subject_id:
df_file_name = '%s/%s_exponents.csv' % (exp_folder, subject)
df_exp = pd.read_csv(df_file_name)
nr_segments.append(len(df_exp))
df_file_name = '%s/%s_rsquare.csv' % (exp_folder, subject)
df_r = pd.read_csv(df_file_name)
# exclude all segments with a model fit worse than r_square threshold
df_exp = df_exp.mask(df_r < rsquare_threshold)
df_exp = df_exp.mean()
df_exp['subject_id'] = subject
dfs.append(df_exp)
print('number of analyzed segments')
print('mean: %.2f, std: %.2f' % (np.mean(nr_segments), np.std(nr_segments)))
df_all = | pd.concat(dfs, axis=1, sort=False) | pandas.concat |
# To add a new cell, type ''
# To add a new markdown cell, type ' [markdown]'
import sys
import os
from os import path
import numpy as np
import pandas as pd
sys.path.append('../../src/')
from .modules.sys_functions.find_files_in_folder import find_files, find_folders, search_dirs
from .modules.classes import Data_Manipulator
elems_keys = "<KEY>"
nodes_keys = "<KEY>"
nodes_filename = "node.txt"
elems_filename = "elem.txt"
log_filename = "run_log.csv"
feb_filename = "myo_hex_coarse_6_epi_60_endo_-60.feb"
# runs_dir = "D:\\Igor\\Research_USF\\University of South Florida\\Mao, Wenbin - Igor\\Febio-Models\\Active-Models\\PAQ\\Hex8-Hex20\\runs\\Hex20"
runs_dir = "./work_bgfs/i/igornobrega/FEBio/PAQ-GammaStudy/myo_hex_coarse_6_epi_60_endo_60_7/runs"
df_dtypes = {
'x': 'float32',
'y': 'float32',
'z': 'float32',
'ux': 'float32',
'uy': 'float32',
'uz': 'float32',
'node': 'category',
'timestep': 'float32',
'run_ref': 'uint8',
'param_val': 'float32',
'sx': 'float32',
'sy': 'float32',
'sz': 'float32',
'sxy': 'float32',
'sxz': 'float32',
'syz': 'float32',
'elem': 'category'
}
# LOAD_FROM_PICKLES = True
READ_PLOT_FILES = True
log_filepath = path.join(runs_dir, log_filename)
run_dirs = find_folders(runs_dir)
runs = []
runs_nums = []
for (dp, dd, dn) in run_dirs:
if dn != "pickles":
_files = []
feb_files = find_files(dp, ("fileFormat", "feb"))
txt_files = find_files(dp, ("fileFormat", "txt"))
_files.extend(feb_files)
_files.extend(txt_files)
runs.append(_files)
runs_nums.append(int(dn.split("-")[-1]))
log_df = pd.read_csv(log_filepath)
# # Create DataManipulators
def mem_usage(pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else: # we assume if not a df it's a series
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return usage_mb
# Since we have the same geometry, we can use one DataManipulator as a reference
# to save memory usage, we will be reading data from nodes/elements and saving into a pickle file
pickles_dir = path.join(runs_dir, "pickles")
tpm_pickles_dir = path.join(pickles_dir, "tpm")
feb_file_ref = runs[0][0][0] # Using same file as ref since geometry is the same <-- MUST CHANGE IF WE USE DIFFERENT GEOMETRY
new_dm = Data_Manipulator(feb_file_ref) # Using same file as ref since geometry is the same <-- MUST CHANGE IF WE USE DIFFERENT GEOMETRY
m_use = 0
f_ctn = 0
# if LOAD_FROM_PICKLES == True:
# dms = []
if READ_PLOT_FILES:
if not path.exists(tpm_pickles_dir):
os.makedirs(tpm_pickles_dir)
# dms = []
for i, run in enumerate(runs):
feb_file = nodes_file = elems_file = None
for (fp, ff, fn) in run:
if ff == feb_filename:
feb_file = fp
elif ff == nodes_filename:
nodes_file = fp
elif ff == elems_filename:
elems_file = fp
if feb_file != None:
# print(feb_file)
# new_dm = Data_Manipulator(feb_file)
run_num = runs_nums[i]
param_val = log_df.loc[log_df["run#"] == run_num]["param"].values[0]
if nodes_file != None and elems_file != None:
pickle_filename = "data-run-{v}.pickle".format(v = run_num)
new_dm.read_plot_file([nodes_file, elems_file], [nodes_keys, elems_keys], ["node", "elem"], "", run_num, param_val, df_dtypes)
m_use += mem_usage(new_dm.data)
f_ctn += 1
new_dm.data.to_pickle(path.join(tpm_pickles_dir, pickle_filename))
# dms.append(new_dm)
if m_use != 0:
print("Total memory usage:", m_use)
print("Average memory usage per df:", m_use / f_ctn)
def calculate_nodes_data(dm, nodes_column, elems_colums, elem_data_labels, accepted_nodes=None, dtypes={}):
# add additional labels
nodes_colums.extend(["node","timestep","run_ref","param_val"])
elems_colums.extend(["elem","timestep","run_ref","param_val"])
# drop not numbers
nodes_df = dm.data[nodes_colums].dropna()
elems_df = dm.data[elems_colums].dropna()
# vectorize elems_df
elem_vec = elems_df.to_dict('records')
# set elems dict
elem_data_dict = dict()
for elem in elem_vec:
elem_data_dict[(elem["elem"],elem["timestep"],elem["run_ref"])] = elem
# vectorize nodes_df
nodes_vec = nodes_df.to_dict('records')
new_nodes_vec = []
# loop through nodes and add nodal data based on elemen value
for node in nodes_vec:
# get node number
node_num = node["node"]
if accepted_nodes != None:
if node_num not in accepted_nodes:
continue
# get node refs
time_step = node["timestep"]
run_ref = node["run_ref"]
# get elems that are connected to given node
elems_c_node = dm.node_dict[int(node_num)]
# get elem_data
elem_data = np.zeros((1,len(elem_data_labels)))
for elem_num in elems_c_node:
elem_vals = elem_data_dict[(elem_num, time_step, run_ref)]
elem_data += np.array([elem_vals[v] for v in elem_data_labels])
elem_data = elem_data / len(elems_c_node)
# add nodal data
for i, el_label in enumerate(elem_data_labels):
node[el_label] = elem_data[0][i]
new_nodes_vec.append(node)
new_df = pd.DataFrame.from_records(new_nodes_vec)
for column_key in dtypes:
if column_key in new_df.columns:
new_df.loc[:,column_key] = new_df[column_key].astype(dtypes[column_key])
return new_df
nodes_colums = [v for v in nodes_keys.split(";")]
elems_colums = [v for v in elems_keys.split(";")]
elem_data_labels = [v for v in elems_keys.split(";")]
pickles_paths = search_dirs(tpm_pickles_dir, ".pickle")
df_ref = pd.read_pickle(pickles_paths[0])
feb_file_ref = runs[0][0][0] # Using same file as ref since geometry is the same <-- MUST CHANGE IF WE USE DIFFERENT GEOMETRY
dm_ref = Data_Manipulator(feb_file_ref)
dm_ref.set_data(pickle_path=pickles_paths[0])
endo_nodes = set(dm_ref.face_dicts[dm_ref.set_order["Endocardio"]].keys())
pickles_dir = path.join(runs_dir, "pickles")
endo_nodes_data_pickles_dir = path.join(pickles_dir, "endo-nodes-data")
if not path.exists(endo_nodes_data_pickles_dir):
os.makedirs(endo_nodes_data_pickles_dir)
for pp in pickles_paths:
# here I am getting just the endocardio node data based on ONE reference FEBIO file (same geometry)
dm_ref.set_data(pickle_path=pp)
df = calculate_nodes_data(dm_ref,nodes_colums, elems_colums, elem_data_labels, endo_nodes, dtypes=df_dtypes)
new_file_name = path.join(endo_nodes_data_pickles_dir, "endo-{v}".format(v=path.basename(pp)))
df.to_pickle(new_file_name)
def combine_pickles(pickles_paths, df=None, started=False, _max=2):
if len(pickles_paths) <= _max:
print([path.basename(pp) for pp in pickles_paths])
if started:
df_list = [df]
df_list.extend([pd.read_pickle(pp) for pp in pickles_paths])
else:
df_list = [ | pd.read_pickle(pp) | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/11/4 17:39
describe: A股强势股票传感器
"""
import os
import os.path
import traceback
import inspect
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from collections import Counter
from tqdm import tqdm
from typing import Callable
from czsc.objects import Event
from czsc.utils import io
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, max_draw_down, turn_over_rate
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def selected_filter_by_index(dc: TsDataCache, dfg: pd.DataFrame, index_code=None):
"""使用指数成分过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param index_code: 指数代码
:return: 过滤后的选股结果
"""
if not index_code or dfg.empty:
return dfg
assert dfg['trade_date'].nunique() == 1
trade_date = dfg['trade_date'].max()
index_members = dc.index_weight(index_code, trade_date)
ts_codes = list(index_members['con_code'].unique())
return dfg[dfg.ts_code.isin(ts_codes)]
def selected_filter_by_concepts(dc, dfg, top_n=20, min_n=3, method='v1'):
"""使用板块效应过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param top_n: 选取前 n 个密集概念
:param min_n: 单股票至少要有 n 个概念在 top_n 中
:param method: 打分计算方法
v1 直接取板块中的强势股数量作为分数
v2 板块内强势股数 / 板块内股数
:return: 过滤后的选股结果
"""
if dfg.empty or not top_n or not min_n:
return dfg, []
ths_members = dc.get_all_ths_members(exchange="A", type_="N")
ths_members = ths_members[~ths_members['概念名称'].isin([
'MSCI概念', '沪股通', '深股通', '融资融券', '上证180成份股', '央企国资改革',
'标普道琼斯A股', '中证500成份股', '上证380成份股', '沪深300样本股',
])]
ths_concepts = ths_members[ths_members.code.isin(dfg.ts_code)]
if method == 'v1':
key_concepts = [k for k, v in Counter(ths_concepts['概念名称'].to_list()).most_common(top_n)]
elif method == 'v2':
all_count = Counter(ths_members['概念名称'].to_list())
sel_count = Counter(ths_concepts['概念名称'].to_list())
df_scores = pd.DataFrame([{"concept": k, 'score': sel_count[k] / all_count[k]}
for k in sel_count.keys()])
key_concepts = df_scores.sort_values('score', ascending=False).head(top_n)['concept'].to_list()
else:
raise ValueError(f"method value error")
sel = ths_concepts[ths_concepts['概念名称'].isin(key_concepts)]
ts_codes = [k for k, v in Counter(sel.code).most_common() if v >= min_n]
dfg = dfg[dfg.ts_code.isin(ts_codes)]
dfg.loc[:, '概念板块'] = dfg.ts_code.apply(lambda x: ths_concepts[ths_concepts.code == x]['概念名称'].to_list())
dfg.loc[:, '概念数量'] = dfg['概念板块'].apply(len)
return dfg, key_concepts
def selected_filter_by_market_value(dfg, min_total_mv=None):
"""使用总市值过滤
:param dfg: 单个交易日的强势股选股结果
:param min_total_mv: 最小总市值,单位为万元,1e6万元 = 100亿
:return: 过滤后的选股结果
"""
if dfg.empty or not min_total_mv:
return dfg
return dfg[dfg['total_mv'] >= min_total_mv]
def selected_filter_by_rps(dfg, n=21, v_range=(0.2, 0.8), max_count=-1):
"""使用b20b过滤,b20b 表示前20个交易日的涨跌幅
:param dfg: 单个交易日的强势股选股结果
:param n: RPS的计算区间
:param v_range: RPS值按从大到小排序后的可选区间
默认为 0.2 ~ 0.8,表示可选区间为排序位置在 20% ~ 80% 区间的股票
:param max_count: 最多保留结果数量
:return: 过滤后的选股结果
"""
if dfg.empty or (not max_count) or len(dfg) < max_count:
return dfg
rps_col = f"b{n}b"
# dfg = dfg.sort_values(rps_col, ascending=True)
# dfg = dfg.reset_index(drop=True)
# dfg = dfg.iloc[int(len(dfg) * v_range[0]): int(len(dfg) * v_range[1])]
# return dfg.tail(max_count)
split = v_range[1]
dfg = dfg.sort_values(rps_col, ascending=True)
head_i = int((len(dfg) - max_count) * split) + 1
tail_i = len(dfg) - int((len(dfg) - max_count) * (1 - split))
return dfg.iloc[head_i: tail_i]
def create_next_positions(dc: TsDataCache, dfg: pd.DataFrame):
"""构建某天选股结果对应的下一交易日持仓明细
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:return: 下一交易日持仓明细
"""
if dfg.empty:
return dfg
trade_cal = dc.trade_cal()
trade_cal = trade_cal[trade_cal.is_open == 1]
trade_dates = trade_cal.cal_date.to_list()
trade_date = dfg['trade_date'].iloc[0]
hold = dfg.copy()
hold['成分日期'] = trade_dates[trade_dates.index(trade_date.strftime("%Y%m%d")) + 1]
hold['持仓权重'] = 0.98 / len(dfg)
hold.rename({'ts_code': "证券代码", "close": "交易价格"}, inplace=True, axis=1)
hold = hold[['证券代码', '持仓权重', '交易价格', '成分日期']]
hold['成分日期'] = pd.to_datetime(hold['成分日期']).apply(lambda x: x.strftime("%Y/%m/%d"))
return hold
def plot_alpha_v1(beta_name, df_alpha, file_png) -> None:
"""用三个并列线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
plt.close()
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(9, 5*3))
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
for i, col in enumerate(['alpha_curve', 'selector_curve', f"beta_curve:{beta_name}"], 0):
ax = axes[i]
sns.lineplot(x='date', y=col, data=df_alpha, ax=ax)
ax.text(x=df_alpha['date'].iloc[0], y=int(df_alpha[col].mean()),
s=f"{col}:{int(df_alpha[col].iloc[-1])}", fontsize=12)
ax.set_title(f"{col}", loc='center')
ax.set_xlabel("")
plt.savefig(file_png, bbox_inches='tight', dpi=100)
plt.close()
def plot_alpha_v2(beta_name, df_alpha, file_png) -> None:
"""用线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
sns.lineplot(x='date', y='alpha_curve', data=df_alpha)
sns.lineplot(x='date', y='selector_curve', data=df_alpha)
sns.lineplot(x='date', y=f"beta_curve:{beta_name}", data=df_alpha)
plt.legend(labels=['超额', '选股', f"基准{beta_name}"])
plt.savefig(file_png, bbox_inches='tight', dpi=100)
def plot_alpha_v3(beta_name, df_alpha, file_png) -> None:
"""用类似MACD图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha'] = df_alpha['selector'] - df_alpha['beta']
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
x = df_alpha['date']
plt.bar(x, height=df_alpha['alpha'], width=0.01, color='blue', label='alpha')
plt.plot(x, df_alpha['alpha_curve'], label='alpha_curve')
plt.plot(x, df_alpha['selector_curve'], label='selector_curve')
plt.plot(x, df_alpha[f"beta_curve:{beta_name}"], label=f"beta_curve:{beta_name}")
plt.legend()
plt.savefig(file_png, bbox_inches='tight', dpi=100)
class StocksDaySensor:
"""以日线为基础周期的强势股票感应器
输入:市场个股全部行情、概念板块成分信息
输出:强势个股列表以及概念板块分布
"""
def __init__(self,
experiment_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
strategy: Callable,
signals_n: int = 0,
):
self.name = self.__class__.__name__
self.version = "V20220404"
self.experiment_path = experiment_path
self.results_path = os.path.join(experiment_path, f"{strategy()[1]().name}_{sdt}_{edt}")
self.signals_path = os.path.join(experiment_path, 'signals')
os.makedirs(self.experiment_path, exist_ok=True)
os.makedirs(self.results_path, exist_ok=True)
os.makedirs(self.signals_path, exist_ok=True)
self.sdt = sdt
self.edt = edt
self.verbose = os.environ.get('verbose', False)
self.strategy = strategy
self.signals_n = signals_n
self.get_signals, self.get_event = strategy()
self.event: Event = self.get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
self.file_docx = os.path.join(self.results_path, f'{self.event.name}_{sdt}_{edt}.docx')
writer = WordWriter(self.file_docx)
if not os.path.exists(self.file_docx):
writer.add_title("股票选股强度验证")
writer.add_page_break()
writer.add_heading(f"{datetime.now().strftime('%Y-%m-%d %H:%M')} {self.event.name}", level=1)
writer.add_heading("参数配置", level=2)
writer.add_paragraph(f"测试方法描述:{self.event.name}")
writer.add_paragraph(f"测试起止日期:{sdt} ~ {edt}")
writer.add_paragraph(f"信号计算函数:\n{inspect.getsource(self.get_signals)}")
writer.add_paragraph(f"事件具体描述:\n{inspect.getsource(self.get_event)}")
writer.save()
with open(os.path.join(self.results_path, f"{strategy.__name__}.txt"), mode='w') as f:
f.write(inspect.getsource(strategy))
self.writer = writer
self.dc = dc
self.betas = ['000905.SH', '000300.SH', '399006.SZ']
get_index_beta(dc, sdt, edt, freq='D', indices=self.betas,
file_xlsx=os.path.join(self.results_path, 'betas.xlsx'))
file_dfm = os.path.join(self.results_path, f'df_event_matched_{sdt}_{edt}.pkl')
file_dfb = os.path.join(self.experiment_path, f'df_all_bars_{sdt}_{edt}.pkl')
if os.path.exists(file_dfm):
self.dfm = io.read_pkl(file_dfm)
self.dfb = io.read_pkl(file_dfb)
else:
self.dfm, self.dfb = self.get_stock_strong_days()
io.save_pkl(self.dfm, file_dfm)
io.save_pkl(self.dfb, file_dfb)
self.nb_cols = [x for x in self.dfb.columns if x[0] == 'n' and x[-1] == 'b']
def get_share_strong_days(self, ts_code: str, name: str):
"""获取单个标的全部强势信号日期"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
file_signals = os.path.join(self.signals_path, f"{ts_code}.pkl")
if os.path.exists(file_signals):
signals, n_bars = io.read_pkl(file_signals)
if self.verbose:
print(f"get_share_strong_days: load signals from {file_signals}")
else:
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals,
signals_n=self.signals_n)
io.save_pkl([signals, n_bars], file_signals)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
event_matched = []
for s in signals:
m, f = event.is_match(s)
if m:
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
r = {'name': name, 'event_match': True, 'factor_match': f}
if nb_info:
r.update(nb_info)
event_matched.append(r)
dfs = pd.DataFrame(event_matched)
if event_matched:
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
dfs = dfs.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
dfs = dfs[pd.to_datetime(sdt) <= dfs['trade_date']]
dfs = dfs[dfs['trade_date'] <= pd.to_datetime(edt)]
print(f"{ts_code} - {name}: {len(dfs)}")
return dfs, n_bars
def get_stock_strong_days(self):
"""获取全部股票的强势日期"""
stocks = self.dc.stock_basic()
all_matched = []
all_bars = []
for row in tqdm(stocks.to_dict('records'), desc="get_stock_strong_days"):
ts_code = row['ts_code']
name = row['name']
try:
dfs, n_bars = self.get_share_strong_days(ts_code, name)
all_matched.append(dfs)
all_bars.append(n_bars)
except:
print(f"get_share_strong_days error: {ts_code}, {name}")
traceback.print_exc()
dfm = | pd.concat(all_matched, ignore_index=True) | pandas.concat |
"""cleaners.py contains functions used to refine the contents of data sets.
All functions work on a pandas dataframe. At times, the order in which functions are called is
important, as some options may cause the insertion of NaNs, which may be undesired for future use.
Processing time may be reduced by reducing the data set to a specific time frame first.
"""
from datetime import datetime, timedelta
from time import mktime
import numpy
import pandas as pd
def duplicates_remove(dataframe: pd.DataFrame,
data_replacement: str = 'none',
removal_time_frame: str = 'day',
fault_placement: str = 'start') -> pd.DataFrame:
"""Removes duplicates for data frame.
The cleaning method can be specified using the different parameters.
Data replacement describes how an error should be handled. None will remove all
duplicate timestamps without refilling. One of the following data replacements methods
must be used:
- first : removes duplicates except for the first occurrence.
- last : removes duplicates except for the last occurrence.
- average: not implemented yet
- max: not implemented yet
- remove: Removes the date to users specifications
- none : no duplicate is kept.
It may be of interest to remove more data then the actual faulty data point. A hole day
(by date) the hole data_set or some hours. One of the following removal_time_frames must
be chosen:
- day: 24 h of data will be removed
- hour: 1h of data will be removed
- all: all data will be removed
The time range determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: fault is places at the beginning of the data that is removed (eg. 1h after the fault is removed)
- middle: fault is places in the middle of the data that is removed (eg. 30 min before and after the fault is removed)
- end: fault is places at the end of the data that is removed (eg. 1h before the fault is removed)
Args:
dataframe (pd.DataFrame): Dataframe with data to be checked for duplicates
data_replacement (str, 'none'): Describes the way data shall be removed. Acceptable values
are first, last, average, max, remove, none.
removal_time_frame (str: 'day'): Describes the time frame that is removed. Acceptable values
are day, hour, all.
fault_placement (str, 'start'): Describes where the error is placed.
Returns:
dataframe (pd.DataFrame): Dataframe without duplicates.
"""
# index.duplicate marks all occurrences as true, but for the indicated on in keep argument
# to remove all duplicates the resulting array needs to be inverted
if data_replacement == 'first':
dataframe = dataframe[~dataframe.index.duplicated(keep='first')]
elif data_replacement == 'last':
dataframe = dataframe[~dataframe.index.duplicated(keep='last')]
elif data_replacement == 'none':
dataframe = dataframe[~dataframe.index.duplicated(keep=False)]
elif data_replacement == 'average':
print("not yet implemented (average)")
elif data_replacement == 'max':
print("not yet implemented (max)")
elif data_replacement == 'remove':
# here all data point that are duplicates are marked true
index = dataframe.index[dataframe.duplicated(keep=False)]
for timestamp in index:
# slicing returns a dataframe from start to end
# to remove a slice from a dataframe has to be done inverted.
# 0 to start index and end index to end of dataframe need to be kept
if removal_time_frame == 'day':
index_date_start, index_date_end = find_time_range_method_day(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'hour':
index_date_start, index_date_end = find_time_range_method_hour(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'all':
dataframe = pd.Dataframe()
return dataframe
def handle_non_numericals(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Replaces all non numerical values like strings in a dataframe with nan.
Args:
dataframe (pd.DataFrame): Dataframe with data to be checked for non numerical values.
Returns:
dataframe (pd.DataFrame): Dataframe with non numerical values changed to nans.
"""
columns = list(dataframe)
# to numeric can only handle scalars, lists, tuples, 1 - d arrays, or Series
for column in columns:
dataframe[column] = pd.to_numeric(dataframe[column], errors='coerce')
return dataframe
def handle_nans(dataframe: pd.DataFrame, data_replacement: str = 'none',
removal_time_frame: str = 'day', fault_placement: str = 'start') -> pd.DataFrame:
"""Removes NaNs from a dataframe.
The cleaning method can be specified using the different parameters.
Data replacement describes how an error should be handled. None will remove all
duplicate timestamps without refilling. One of the following data replacements methods
must be used:
- drop: NANs are dropped.
- zero: Fills all NANs with 0 (value).
- first: Uses the previous non - NAN occurrence.
- last: Uses the next non - NAN occurrence.
- none: Nothing is changed.
- average: not implemented yet
- max: not implemented yet
- remove: Removes the date to users specifications.
It may be of interest to remove more data then the actual faulty data point. A hole day
(by date) the hole data_set or some hours. One of the following removal_time_frames must
be chosen:
- day: 24 hours of data will be removed.
- hour: 1 hour of data will be removed.
- all: All data will be removed.
The timerange determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: Fault is placed at the beginning of the data that is removed (eg. 1 hour after the fault is removed).
- middle: Fault is placed in the middle of the data that is removed (eg. 30 min before and after the fault is removed).
- end: Fault is placed at the end of the data that is removed (eg. 1 hour before the fault is removed).
Args:
dataframe (pd.DataFrame): Dataframe with data to be fixed.
data_replacement (str, 'none'): Describes the way data shall be removed.
removal_time_frame (str, 'day'): Describes the time frame that is removed.
fault_placement (str, 'start'): Describes where the error is placed.
Returns:
dataframe (pd.DataFrame): Dataframe with NaNs removed.
"""
if data_replacement == 'drop':
dataframe = dataframe.dropna()
elif data_replacement == 'zero':
dataframe = dataframe.fillna(0)
elif data_replacement == 'first':
dataframe = dataframe.fillna(method='ffill')
elif data_replacement == 'last':
dataframe = dataframe.fillna(method='bfill')
elif data_replacement == 'average':
print("not yet implemented (average)")
elif data_replacement == 'max':
print("not yet implemented (max)")
elif data_replacement == 'remove':
# gets index for wrong signs
index = dataframe.index[dataframe.isnull().any(axis=1)]
# iterates through index
for timestamp in index:
# converts index to date time
index_date_start = timestamp
index_date_end = timestamp
if removal_time_frame == 'day':
index_date_start, index_date_end = find_time_range_method_day(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'hour':
index_date_start, index_date_end = find_time_range_method_hour(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'all':
dataframe = pd.DataFrame()
return dataframe
def remove_negative_values(dataframe: pd.DataFrame, data_replacement: str = 'none',
removal_time_frame: str = 'day', fault_placement: str = 'start',
coloumn_index: int = 0) -> pd.DataFrame:
"""Handles the occurrence of negative values in a dataframe, which may be assumed
to be faulty data points. The cleaning method can be specified using the different
parameters
Data replacement describes how an error should be handled. None will remove all
duplicate timestamps without refilling. One of the following data replacements methods
must be used:
- drop: NANs are dropped.
- zero: Fills all NANs with 0 (value).
- nan: Fills negative values with NaN's.
- none: Nothing is changed.
- remove: Removes the date to users specifications.
It may be of interest to remove more data then the actual faulty data point. A hole day
(by date) the hole data_set or some hours. One of the following removal_time_frames must
be chosen:
- day: 24 hours of data will be removed.
- hour: 1 hour of data will be removed.
- all: All data will be removed.
The time range determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: Fault is places at the beginning of the data that is removed (eg. 1 hour after the fault is removed)
- middle: Fault is places in the middle of the data that is removed (eg. 30 minutes before and after the fault is removed)
- end: Fault is places at the end of the data that is removed (eg. 1 hour before the fault is removed)
Args:
dataframe (pd.dataframe): Dataframe with data to be fixed
data_replacement (str, 'none'): Describes the way data shall be removed. Acceptable values
are drop, zero, nan, none, remove.
removal_time_frame (str, 'day'): Describes the time frame that is removed. Acceptable values
are day, hour, all.
fault_placement (str, 'start'): Describes where the error is placed. Acceptable values are
start, middle, end.
coloumn_index(int): index of column that should be cleaned from negative values
Returns:
dataframe (pd.dataframe): Dataframe with cleaned data.
"""
nu_negative_loads_occurrences = numpy.sum((dataframe.iloc[:, 0] < 0).values.ravel())
if nu_negative_loads_occurrences > 0:
if data_replacement == 'zero':
dataframe[dataframe.iloc[:, coloumn_index] < 0] = 0
elif data_replacement == 'nan':
dataframe[dataframe.iloc[:, coloumn_index] < 0] = numpy.nan
elif data_replacement == 'drop':
dataframe.drop(dataframe[(dataframe.iloc[:, coloumn_index] < 0)].index, inplace=True)
elif data_replacement == 'remove':
# gets index for wrong signs
index = dataframe[(dataframe.iloc[:, coloumn_index] > 0)].index
# iterates through index
for timestamp in index:
# convertes index to date time
index_date_start = timestamp
index_date_end = timestamp
if removal_time_frame == 'day':
index_date_start, index_date_end = find_time_range_method_day(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'hour':
index_date_start, index_date_end = find_time_range_method_hour(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'all':
dataframe = pd.DataFrame()
return dataframe
def remove_positive_values(dataframe: pd.DataFrame, data_replacement: str = 'none',
removal_time_frame: str = 'day', fault_placement: str = 'start',
column_index: int = 0) -> pd.DataFrame:
"""Handles the occurrence of positive values in a dataframe, which may be assumed
to be faulty data points. The cleaning method can be specified using the different
parameters.
Data replacement describes how an error should be handled.
One of the following data replacements methods must be used:
- drop: NANs are dropped.
- zero: Fills all NANs with 0 (value).
- nan: Fills negative values with NaN's.
- none: Nothing is changed.
- remove: Removes the date to users specifications.
It may be of interest to remove more data then the actual faulty data point. A hole day
(by date) the hole data_set or some hours. One of the following removal_time_frames must
be chosen:
- day: 24 hour of data will be removed.
- hour: 1 hour of data will be removed.
- all: All data will be removed.
The time range determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: Fault is places at the beginning of the data that is removed (eg. 1 hour after the fault is removed)
- middle: Fault is places in the middle of the data that is removed (eg. 30 minutes before and after the fault is removed)
- end: Fault is places at the end of the data that is removed (eg. 1 hour before the fault is removed)
Args:
dataframe (pd.dataframe): Dataframe with data to be fixed.
data_replacement (str, 'none'): Describes the way data shall be removed. Acceptable values
are drop, zero, nan, none, remove.
removal_time_frame (str, 'day'): Describes the time frame that is removed. Acceptable values
are day, hour, all.
fault_placement (str, 'start'): Describes where the error is placed. Acceptable values are
start, middle, end.
column_index(int): index of column that should be cleaned from positive values
Returns:
dataframe (pd.dataframe): Dataframe with cleaned data.
"""
if data_replacement == 'zero':
dataframe[dataframe.iloc[:, column_index] > 0] = 0
elif data_replacement == 'nan':
dataframe[dataframe.iloc[:, column_index] > 0] = numpy.nan
elif data_replacement == 'drop':
dataframe.drop(dataframe[(dataframe.iloc[:, column_index] > 0)].index, inplace=True)
elif data_replacement == 'remove':
# gets index for wrong signs
index = dataframe[(dataframe.iloc[:, column_index] > 0)].index
# iterates through index
for timestamp in index:
# converts index to date time
index_date_start = timestamp
index_date_end = timestamp
if removal_time_frame == 'day':
index_date_start, index_date_end = find_time_range_method_day(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'hour':
index_date_start, index_date_end = find_time_range_method_hour(timestamp,
fault_placement)
dataframe = slice_by_index(dataframe,
timestamp_start=index_date_start,
timestamp_end=index_date_end)
elif removal_time_frame == 'all':
dataframe = pd.DataFrame()
return dataframe
def find_time_range_method_hour(timestamp: int, fault_placement: str = "end") -> tuple:
""""The method calculates the start and end time for a data removal.
The time frame considered is here 1h.
Todo: User should be able to choose amount of hours to be removed
Note: the start and end time returned is where the time frame starts and ends. If that used to
removed data with pd.loc function it needs to be called twice (start of data frame to
start_time and end_time to end of data frame)
The timerange determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: The fault is placed at the beginning of the data that is removed (eg. 1 hour after the fault is removed).
- middle: The fault is placed in the middle of the data that is removed (eg. 30 minutes before and after the fault is removed).
- end: The fault is placed at the end of the data that is removed (eg. 1 hour before the fault is removed).
Args:
timestamp (int): timestamp in unixtime around which data needs to be removed
fault_placement (str, 'end'): Describes where the error is placed. Acceptable values are
start, middle, end.
Returns:
start_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the start time of
data removal.
end_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the end time of
data removal.
"""
date = datetime.fromtimestamp(timestamp)
if fault_placement == "end":
index_date_end = date.strftime('%Y-%m-%d %H:%M')
index_date_start = date - timedelta(hours=1)
index_date_start = index_date_start.strftime('%Y-%m-%d %H:%M')
elif fault_placement == "start":
index_date_start = date.strftime('%Y-%m-%d %H:%M')
index_date_end = date + timedelta(hours=1)
index_date_end = index_date_end.strftime('%Y-%m-%d %H:%M')
elif fault_placement == "middle":
index_date_start = date - timedelta(hours=1 / 2)
index_date_start = index_date_start.strftime('%Y-%m-%d %H:%M')
index_date_end = date + timedelta(hours=1 / 2)
index_date_end = index_date_end.strftime('%Y-%m-%d %H:%M')
else:
index_date_start = date
index_date_end = date
index_date_start = datetime.strptime(index_date_start, "%Y-%m-%d %H:%M")
index_date_end = datetime.strptime(index_date_end, "%Y-%m-%d %H:%M")
return int(mktime(index_date_start.timetuple())), int(mktime(index_date_end.timetuple()))
def find_time_range_method_day(timestamp: tuple, fault_placement: str = 'start'):
""""The method calculates the start and end time for a data removal.
The time frame considered is here 1 day.
Note: the start and end time returned is where the time frame starts and ends. If that used to
removed data with pd.loc function it needs to be called twice (start of data frame to
start_time and end_time to end of data frame)
Todo: User should be able to choose amount of days to be removed
The timerange determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: The fault is placed at the beginning of the data that is removed (eg. 1 hour after the fault is removed).
- middle: The fault is placed in the middle of the data that is removed (eg. 30 minutes before and after the fault is removed).
- end: The fault is placed at the end of the data that is removed (eg. 1 hour before the fault is removed).
Args:
timestamp (int): Timestamp in unix time around which data needs to be removed.
fault_placement (str, 'start'): Describes where the error is placed.
Returns:
start_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the start time of
data removal.
end_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the end time of
data removal.
"""
date = datetime.fromtimestamp(timestamp)
if fault_placement == "start":
# removes 24 before the timestamp
index_date_start = date.strftime('%Y-%m-%d %H:%M')
index_date_end = date + timedelta(days=1)
index_date_end = index_date_end.strftime('%Y-%m-%d %H:%M')
elif fault_placement == "end":
# removes 24 before the timestamp
index_date_end = date.strftime('%Y-%m-%d %H:%M')
index_date_start = date - timedelta(days=1)
index_date_start = index_date_start.strftime('%Y-%m-%d %H:%M')
elif fault_placement == "middle":
# removes 12h before/after the timestamp
index_date_start = date - timedelta(hours=12)
index_date_start = index_date_start.strftime('%Y-%m-%d %H:%M')
index_date_end = date + timedelta(hours=12)
index_date_end = index_date_end.strftime('%Y-%m-%d %H:%M')
else:
index_date_start = date
index_date_end = date
index_date_start = datetime.strptime(index_date_start, "%Y-%m-%d %H:%M")
index_date_end = datetime.strptime(index_date_end, "%Y-%m-%d %H:%M")
return int(mktime(index_date_start.timetuple())), int(mktime(index_date_end.timetuple()))
def find_time_range_method_calendarday(timestamp: tuple):
""""The method calculates the start and end time for a data removal.
The time frame considered is here 1 day.
Note: the start and end time returned is where the time frame starts and ends. If that used to
removed data with pd.loc function it needs to be called twice (start of data frame to
start_time and end_time to end of data frame)
Todo: User should be able to choose amount of days to be removed
The timerange determine the position of the data point in the middle, at the end or at the
start of the data. One of the following fault placements are possible:
- start: The fault is placed at the beginning of the data that is removed (eg. 1 hour after the fault is removed).
- middle: The fault is placed in the middle of the data that is removed (eg. 30 minutes before and after the fault is removed).
- end: The fault is placed at the end of the data that is removed (eg. 1 hour before the fault is removed).
Args:
timestamp (int): Timestamp in unix time around which data needs to be removed.
Returns:
start_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the start time of
data removal.
end_time (datetime): Timestamp in datetime format ("%Y-%m-%d %H:%M") for the end time of
data removal.
"""
date = datetime.fromtimestamp(timestamp)
index_date_start = date.strftime('%Y-%m-%d')
index_date_end = date + timedelta(days=1)
index_date_end = index_date_end.strftime('%Y-%m-%d %H:%M')
index_date_start = datetime.strptime(index_date_start, "%Y-%m-%d %H:%M")
index_date_end = datetime.strptime(index_date_end, "%Y-%m-%d %H:%M")
return int(mktime(index_date_start.timetuple())), int(mktime(index_date_end.timetuple()))
def slice_by_index(dataframe: pd.DataFrame, timestamp_start: int = None,
timestamp_end: int = None) -> pd.DataFrame:
"""cuts out the data in between the timestamps given and returns the data to both sides of the
time range given. If one start is not provided, it is assumed to be the start of the data frame.
If end is not provided its assumed to be the end of the data frame
Args:
dataframe (pd.DataFrame): Data frame to be sliced
timestamp_start (int): index of first data point (inclusive, unix timestamp) .
timestamp_end (int): index of last data point (inclusive, unix time stamp)
Returns:
dataframe (pd.DataFrame): sliced pd DataFrame.
"""
if timestamp_start is None:
timestamp_start = dataframe.first_valid_index()
if timestamp_end is None:
timestamp_end = dataframe.last_valid_index()
dataframe = dataframe[(dataframe.index < timestamp_start) | (dataframe.index > timestamp_end)]
return dataframe
def time_filter_data(dataframe: pd.DataFrame, timestamp_start: int = None,
timestamp_end: int = None) -> pd.DataFrame:
"""reduce a dataframe based on the provided times start and end timestamp. It is assumed that
the provided time stamp are not necessarily in the data, an approximation is used to slice as
accurately as possible. If start is not provided, it is assumed to be the
start of the data frame. If end is not provided its assumed to be the end of the data frame.
Note: the index will be sorted in order to enable slicing
Args:
dataframe (pd.DataFrame): Data frame to be sliced
timestamp_start (int): index of first data point (inclusive, unix timestamp) .
timestamp_end (int): index of last data point (inclusive, unix time stamp)
Returns:
dataframe (pd.DataFrame): sliced pd DataFrame.
"""
dataframe = dataframe.sort_index()
if timestamp_start is None:
print("start index was not provided")
timestamp_start = dataframe.first_valid_index()
if timestamp_end is None:
print("end index is not provided")
timestamp_end = dataframe.last_valid_index()
reduced_dataframe = dataframe[(dataframe.index > timestamp_start) & (dataframe.index < timestamp_end)]
return reduced_dataframe
def resample(dataframe: pd.DataFrame, resampling_step: int = None, resampling_unit: str = 't',
resampling_strategy_upsampling: str = 'first') -> pd.DataFrame:
"""Resample data to desired spacing.
If the resolution is finer values averaged (mean). If the resolutions is coarser the first value
can be chosen (ffill) or the next value (last/bbill) can be used.
In special circumstance it my be useful to fill the missing data with NaN(nan)
The following describes the parameters in more detail:
- resampling_step: This is the desired time step of final dataframe.
- resampling_unit: The unit of desired time step. Possible units are:
- h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
- t minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
- s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
One of the following upsampling strategies are possible
- first: The value before the newly inserted value is chosen (ffill).
- last: The next value after the newly inserted value is chosen.
Args:
dataframe (pd.DataFrame): The dataframe to be resampled.
resampling_step (int, 8): This is the desired time step of final dataframe.
resampling_unit (str, 't'): unit of desired time step
resampling_strategy_upsampling (str, 'first', nan): Define how the upsampling is conducted.
Returns:
dataframe_tmp (pd.DataFrame): The resampled dataframe.
"""
if resampling_step is not None and len(dataframe) > 1:
delta_time_tmp = pd.to_timedelta(resampling_step, resampling_unit)
# force index and delta time to have the same unit
delta_time_tmp = pd.to_timedelta(delta_time_tmp, 's')
if dataframe.index[0].dtype in ['timedelta64[s]', 'timedelta64[m]', 'timedelta64[h]']:
delta_time_tmp_raw = pd.to_timedelta((dataframe.index[1] - dataframe.index[0]), 's')
else:
delta_time_tmp_raw = pd.to_timedelta(int(dataframe.index[1] - dataframe.index[0]), 's')
if delta_time_tmp == delta_time_tmp_raw:
print("Raw data sample rate is at desired rate", delta_time_tmp_raw)
return dataframe
# Temporarily make datetime the index to allow for resampling
dataframe['ts'] = pd.to_datetime(dataframe.index, unit='s')
dataframe['original index'] = dataframe.index
dataframe = dataframe.set_index('ts')
# Raw data has finer resolution than requested - down sample by averaging
if delta_time_tmp_raw < delta_time_tmp:
dataframe_tmp = dataframe.resample(delta_time_tmp).mean()
# Raw data has coarser resolution than requested - up sample by infilling
elif delta_time_tmp_raw > delta_time_tmp:
if resampling_strategy_upsampling == 'first':
dataframe_tmp = dataframe.resample(delta_time_tmp).ffill()
elif resampling_strategy_upsampling == 'last':
dataframe_tmp = dataframe.resample(delta_time_tmp).bfill()
elif resampling_strategy_upsampling == 'nan':
dataframe_tmp = dataframe.resample(delta_time_tmp).asfreq()
dataframe_tmp['original index'] = dataframe_tmp.index.astype(numpy.int64) // 10 ** 9
dataframe_tmp.reset_index(drop=True, inplace=True)
dataframe_tmp['ts'] = dataframe_tmp['original index']
dataframe_tmp = dataframe_tmp.set_index('ts')
dataframe_tmp = dataframe_tmp.drop('original index', axis='columns')
else:
print("Unable to resample (step is invalid or to little data)")
dataframe_tmp = dataframe
return dataframe_tmp
def data_refill(dataframe: pd.DataFrame, days: int = 7, attempts: int = 7, threshold: int = 5,
forward_fill: bool = True, backward_fill: bool = True):
""" refills data with future data or data from the past.
Since the data may be influenced by weather or season. It may be useful to adjust
the days to jump and the attempts used. If jumping for a week for 7 attempts to find
a suitable block my change the season in which the data was recorded. Attempt * days
gives a reasonable estimate on how far away a sample can be from the original block
Note: in the data frame provide missing data needs to replaced by NaN.
Functions for cleaning used prior to this function should be set up accordingly
Note: this function assumes a full index (use force_full_index beforehand)
Args:
dataframe(dataframe): data frame containing NaN values
days(int): Number of days that should be jumped for each try
attempts(int): number of tries to find a matching dataset
threshold(int): number of samples that are considered to be a block
forward_fill(bool): use data from the future
backward_fill(bool): use data from the past
Returns:
dataframe: a refilled dataframe that may still contain NaN due to impossible refills.
"""
# check which rows contain NaNs
if dataframe.isnull().any().any():
print("NaN's detected proceed with data refill")
check_for_nan = dataframe.isnull().any(axis=1)
# find all indexes for nans and numbers
blocks = (check_for_nan != check_for_nan.shift()).cumsum()
# Filter to create a series with (index_start, index_end, count) for NaN blocks
indices = blocks.groupby(blocks).apply(lambda x: (x.index[0],
x.index[-1],
(x.index[-1] - x.index[0])
if check_for_nan[x.index[0]]
else numpy.NaN))
# drop all nan blocks
indices = indices.dropna()
indices = pd.DataFrame(indices.tolist(),
columns=['out-1', 'out-2', 'out-3'],
index=indices.index)
# remove Rows that don't match the threshold
# timestep between samples in unit of timestamps
delta_time_tmp = dataframe.index[0] - dataframe.index[1]
# threshold describes the number of samples that make a block of missing data
# (needs to consider the spacing between data)
threshold_sec = threshold * delta_time_tmp
indices = indices[indices['out-3'] >= threshold_sec]
# list with indices that need replacement
indices_list = indices.values.tolist()
print("found ", len(indices_list), "blocks of NaN's")
# find suitable replacement data
for block in indices_list:
benchmark = block[2] / delta_time_tmp
attempt = 0
# check for future data to refill
if forward_fill:
for iter in range(1, attempts):
timediff = pd.Timedelta(days, unit='D')
start = int(mktime((datetime.fromtimestamp(block[0]) + (timediff*iter)).timetuple()))
end = int(mktime((datetime.fromtimestamp(block[1]) + (timediff*iter)).timetuple()))
replacement_block = time_filter_data(dataframe, start, end)
if not replacement_block.empty:
num_of_nan = replacement_block.isnull().sum().sum()
# check if they are better suited (less NaN)
if benchmark > num_of_nan:
benchmark = num_of_nan
attempt = iter
if backward_fill:
for iter in range(1, attempts):
timediff = | pd.Timedelta(days, unit='D') | pandas.Timedelta |
import pandas as pd
import numpy as np
import math
from statistics import mean
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
from metrics_ import PPTS,mean_absolute_percentage_error
def read_two_stage(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(predictions))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_two_stage_traindev_test(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
test_predss = pd.DataFrame()
dev_predss = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
test_y = data['test_y'][0:120]
dev_y = data['dev_y'][0:120]
dev_pred=data['dev_pred'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
dev_pred=dev_pred.reset_index(drop=True)
test_pred=test_pred.reset_index(drop=True)
test_predss = pd.concat([test_predss,test_pred],axis=1)
dev_predss = pd.concat([dev_predss,dev_pred],axis=1)
test_predss = test_predss.mean(axis=1)
dev_predss = dev_predss.mean(axis=1)
test_y = test_y.values.flatten()
dev_y = dev_y.values.flatten()
test_predss = test_predss.values.flatten()
dev_predss = dev_predss.values.flatten()
test_nse=r2_score(y_true=test_y,y_pred=test_predss)
test_nrmse=math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_predss))/(sum(test_y)/len(test_predss))
test_mae=mean_absolute_error(y_true=test_y,y_pred=test_predss)
test_mape=mean_absolute_percentage_error(y_true=test_y,y_pred=test_predss)
test_ppts=PPTS(y_true=test_y,y_pred=test_predss,gamma=5)
dev_nse=r2_score(y_true=dev_y,y_pred=dev_predss)
dev_nrmse=math.sqrt(mean_squared_error(y_true=dev_y,y_pred=dev_predss))/(sum(dev_y)/len(dev_predss))
dev_mae=mean_absolute_error(y_true=dev_y,y_pred=dev_predss)
dev_mape=mean_absolute_percentage_error(y_true=dev_y,y_pred=dev_predss)
dev_ppts=PPTS(y_true=dev_y,y_pred=dev_predss,gamma=5)
metrics_dict={
"dev_nse":dev_nse,
"dev_nrmse":dev_nrmse,
"dev_mae":dev_mae,
"dev_mape":dev_mape,
"dev_ppts":dev_ppts,
"test_nse":test_nse,
"test_nrmse":test_nrmse,
"test_mae":test_mae,
"test_mape":test_mape,
"test_ppts":test_ppts,
"time_cost":time_cost,
}
time_cost=mean(time_cost)
return dev_y,dev_predss,test_y,test_predss,metrics_dict
def read_two_stage_max(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
r2list=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
r2list.append(data['test_r2'][0])
print("one-month NSE LIST:{}".format(r2list))
max_id = r2list.index(max(r2list))
print("one-month max id:{}".format(max_id))
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(max_id+1)+".csv"
data = pd.read_csv(model_path+model_name)
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
records = records.values.flatten()
predictions = test_pred.values.flatten()
r2=data['test_r2'][0]
nrmse=data['test_nrmse'][0]
mae=data['test_mae'][0]
mape=data['test_mape'][0]
ppts=data['test_ppts'][0]
time_cost=data['time_cost'][0]
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pure_esvr(station):
model_path = root_path+"\\"+station+"\\projects\\esvr\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_esvr_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(records))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pca_metrics(station,decomposer,start_component,stop_component,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\"+wavelet_level+"\\one_step_1_month_forecast\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\one_step_1_month_forecast\\"
train = pd.read_csv(model_path+"minmax_unsample_train.csv")
dev = pd.read_csv(model_path+"minmax_unsample_dev.csv")
test = pd.read_csv(model_path+"minmax_unsample_test.csv")
norm_id=pd.read_csv(model_path+"norm_unsample_id.csv")
sMax = (norm_id['series_max']).values
sMin = (norm_id['series_min']).values
# Conncat the training, development and testing samples
samples = pd.concat([train,dev,test],axis=0)
samples = samples.reset_index(drop=True)
# Renormalized the entire samples
samples = np.multiply(samples + 1,sMax - sMin) / 2 + sMin
y = samples['Y']
X = samples.drop('Y',axis=1)
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("n_components_pca_mle:{}".format(n_components_pca_mle))
mle = X.shape[1]-n_components_pca_mle
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
for i in range(start_component,stop_component+1):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
# averaging the trained svr with different seed
test_pred_df = pd.DataFrame()
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_one_step_1_month_forecast_with_pca_"+str(i)+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
test_y = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
test_pred_df = pd.concat([test_pred_df,test_pred],axis=1)
test_pred = test_pred_df.mean(axis=1)
test_y = test_y.values
test_pred = test_pred.values
print(type(test_y))
print(type(test_pred))
r2.append(r2_score(y_true=test_y,y_pred=test_pred))
nrmse.append(math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_pred))/(sum(test_y)/len(test_y)))
mae.append(mean_absolute_error(y_true=test_y,y_pred=test_pred))
mape.append(mean_absolute_percentage_error(y_true=test_y,y_pred=test_pred))
ppts.append(PPTS(y_true=test_y,y_pred=test_pred,gamma=5))
pc0_records,pc0_predictions,pc0_r2,pc0_nrmse,pc0_mae,pc0_mape,pc0_ppts,pc0_time_cost=read_two_stage(station=station,decomposer=decomposer,predict_pattern="one_step_1_month_forecast",)
r2.append(pc0_r2)
nrmse.append(pc0_nrmse)
mae.append(pc0_mae)
mape.append(pc0_mape)
ppts.append(pc0_ppts)
r2.reverse()
nrmse.reverse()
mae.reverse()
mape.reverse()
ppts.reverse()
return mle,r2,nrmse,mae,mape,ppts
def read_long_leading_time(station,decomposer,mode='new',wavelet_level="db10-2"):
records=[]
predictions=[]
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"
m1_records,m1_predictions,m1_r2,m1_nrmse,m1_mae,m1_mape,m1_ppts,m1_time_cost=read_two_stage(station=station,decomposer=decomposer,predict_pattern="one_step_1_month_forecast",)
records.append(m1_records)
predictions.append(m1_predictions)
r2.append(m1_r2)
nrmse.append(m1_nrmse)
mae.append(m1_mae)
mape.append(m1_mape)
ppts.append(m1_ppts)
# averaging the trained svr with different seed
test_pred_df = pd.DataFrame()
leading_times=[3,5,7,9]
for leading_time in leading_times:
print("Reading mode:{}".format(mode))
if mode==None:
file_path = model_path+"one_step_"+str(leading_time)+"_month_forecast//"
else:
file_path = model_path+"one_step_"+str(leading_time)+"_month_forecast_"+mode+"//"
for j in range(1,11):
if mode == None:
model_name = station+"_"+decomposer+"_esvr_one_step_"+str(leading_time)+"_month_forecast_seed"+str(j)+".csv"
else:
model_name = station+"_"+decomposer+"_esvr_one_step_"+str(leading_time)+"_month_forecast_"+mode+"_seed"+str(j)+".csv"
data = | pd.read_csv(file_path+model_name) | pandas.read_csv |
import pdb
import pickle
import pandas as pd
import os
import numpy as np
import sys
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
from config_u import base
project_base_path = base
current_path = "scripts/cpmg/automated_metabolite_quantification/"
sys.path.insert(1, os.path.join(project_base_path, current_path))
from data_utils import split_to_kfold, spectrum2ppm, spectrum_peak_unit_quantification
''' Custom data generator functions for fold generation with no patient and sample overlap'''
# Option #1.A: only valid PC and fully quantified samples (train, vald and test)
def cpmg_generator_1A(k, fold_dct, statistics, spectra, ppm_spectra, quant, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = spectra[vald_fold_idx,:]
vald_fold["quant"] = quant[vald_fold_idx,:]
vald_fold["ppm_spectra"] = ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = class_labels[vald_fold_idx,:]
vald_fold["stats"] = statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #1.B: only valid PC and fully quantified samples (train and test)
def cpmg_generator_1B(k, fold_dct, statistics, spectra, ppm_spectra, quant, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, test_fold_idx, train_fold, test_fold, all_data)
cur_iter += 1
# Option #2.A: valid PC and fully quantified samples form test folds
# but invalid samples are injected to the training dataset by hand (train, vald and test)
def cpmg_generator_2A(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = spectra[vald_fold_idx,:]
vald_fold["quant"] = quant[vald_fold_idx,:]
vald_fold["ppm_spectra"] = ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = class_labels[vald_fold_idx,:]
vald_fold["stats"] = statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = np.concat((spectra[train_fold_idx,:], invalid_spectra[:,:]), axis=0)
train_fold["quant"] = np.concat((quant[train_fold_idx,:], invalid_quant[:,:]), axis=0)
train_fold["ppm_spectra"] = np.concat((ppm_spectra[train_fold_idx,:], invalid_ppm_spectra[:,:]), axis=0)
train_fold["class_labels"] = np.concat((class_labels[train_fold_idx,:], np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = pd.concat([statistics.iloc[train_fold_idx,:], invalid_statistics]).reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #2.B: valid PC and fully quantified samples form test folds
# but invalid samples are injected to the training dataset by hand (train, vald and test)
def cpmg_generator_2B(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = np.concat((spectra[train_fold_idx,:], invalid_spectra[:,:]), axis=0)
train_fold["quant"] = np.concat((quant[train_fold_idx,:], invalid_quant[:,:]), axis=0)
train_fold["ppm_spectra"] = np.concat((ppm_spectra[train_fold_idx,:], invalid_ppm_spectra[:,:]), axis=0)
train_fold["class_labels"] = np.concat((class_labels[train_fold_idx,:], np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = | pd.concat([statistics.iloc[train_fold_idx,:], invalid_statistics]) | pandas.concat |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = | pd.get_dummies(Submarket1, prefix="Submarket1") | pandas.get_dummies |
import os
import click
import requests
import shutil
import pandas as pd
COMPLEX = 'ComplexParticipantsPubMedIdentifiers_human.txt'
PE = 'NCBI2Reactome_PE_All_Levels.txt'
REACTIONS = 'NCBI2ReactomeReactions.txt'
CHEMICALS = 'ChEBI2Reactome_PE_Reactions.txt'
PREFIX = 'ncbigene'
def download(url:str, download_dir:str='') -> str:
_, filename = url.rsplit('/', 1)
path = os.path.join(download_dir, filename)
with requests.get(url, stream=True) as r:
with open(path, 'wb+') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
return path
def download_files(download_dir, force_download=False):
filenames = [COMPLEX, PE, REACTIONS, CHEMICALS]
for filename in filenames:
if os.path.exists(os.path.join(download_dir, filename)) and not force_download:
continue
url = 'https://reactome.org/download/current/' + filename
path = download(url, download_dir)
print('Downloaded {}'.format(path))
@click.command()
@click.option('--download-dir', '-d', default='backend/data')
@click.option('--force-download', '-f', is_flag=True)
@click.option('--output', '-o', default='backend/data/id_mapping.csv')
def main(download_dir, force_download, output):
"""
Builds up a CSV correlating labels from the Reactome SBGN's with identifiers
Use: `python scripts/build_id_mapping_csv.py`
"""
download_files(download_dir, force_download)
path = os.path.join(download_dir, PE)
print('Parsing {}'.format(path))
df = pd.read_csv(path, header=None, dtype=str, sep='\t')
df['name'] = df[2].apply(lambda x: x.split('[', 1)[0].strip())
df['id'] = df[0].apply(lambda x: '{}:{}'.format(PREFIX, x))
df['species'] = df[7]
df_genes = df[df['species'] == 'Homo sapiens'][['id', 'name']]
path = os.path.join(download_dir, REACTIONS)
print('Parsing {}'.format(path))
df = pd.read_csv(path, header=None, dtype=str, sep='\t')
df['name'] = df[3]
df['id'] = df[1]
df['species'] = df[5]
df_reactions = df[df['species'] == 'Homo sapiens'][['id', 'name']]
path = os.path.join(download_dir, COMPLEX)
print('Parsing {}'.format(path))
df = | pd.read_csv(path, dtype=str, sep='\t') | pandas.read_csv |
import unittest
from .. import simulate_endToEnd
from Bio.Seq import MutableSeq
from Bio import SeqIO
from Bio.Alphabet import generic_dna
import pandas as pd
import numpy as np
import mock
import os
class TestSimulateNormal(unittest.TestCase):
def setUp(self):
self.genome = {"chr1": MutableSeq("NNNNAGAGCTACGATGCTACGATGNNNNN", generic_dna),
"chr2": MutableSeq("NNNNNNAGAGCTACNNNGATGCGATGNN", generic_dna)}
def test_remove_Ns(self):
genome_out = {}
(genome_out['chr1'], offset) = simulate_endToEnd.remove_trailing_N_characters(self.genome['chr1'])
(genome_out['chr2'], offset) = simulate_endToEnd.remove_trailing_N_characters(self.genome['chr2'])
self.assertEqual(genome_out, {"chr1": MutableSeq("AGAGCTACGATGCTACGATG", generic_dna),
"chr2": MutableSeq("AGAGCTACNNNGATGCGATG", generic_dna)})
def test_subtract_beds(self):
lists = [['chr2', 6, 7, 'insertion', 'AAA', 2],['chr1', 6, 15, 'inversion', '-', 0]]
first_bed = pd.DataFrame(lists)
first_bed.columns = ['chrom', 'start', 'end', 'name', 'alt', 'uid']
lists = [['chr2', 6, 7, 'insertion', 'AAA', 2]]
second_bed = pd.DataFrame(lists)
second_bed.columns = ['chrom', 'start', 'end', 'name', 'alt', 'uid']
new_bed = simulate_endToEnd.subtract_beds(first_bed, second_bed)
# Have to reset the index, or otherwise the indices will be unequal
new_bed = new_bed.reset_index(drop=True)
lists = [['chr1', 6, 15, 'inversion', '-', 0]]
expected_df = pd.DataFrame(lists)
expected_df.columns = ['chrom', 'start', 'end', 'name', 'alt', 'uid']
self.assertTrue(expected_df.equals(new_bed))
def test_genome_offset(self):
genome_out = {}
genome_offset = {}
for chrom in self.genome:
(genome_out[chrom], genome_offset[chrom]) = simulate_endToEnd.remove_trailing_N_characters(
self.genome[chrom])
self.assertEqual(genome_offset['chr1'], 4)
self.assertEqual(genome_offset['chr2'], 6)
def test_bed_reoffset(self):
genome_out = {}
genome_offset = {}
for chrom in self.genome:
(genome_out[chrom], genome_offset[chrom]) = simulate_endToEnd.remove_trailing_N_characters(
self.genome[chrom])
lists = [['chr2', 6, 7, 'insertion', 'AAA', 2],['chr1', 6, 15, 'inversion', '-', 0]]
first_bed = pd.DataFrame(lists)
first_bed.columns = ['chrom', 'start', 'end', 'name', 'alt', 'uid']
corrected_bed = simulate_endToEnd.offset_bed(first_bed, genome_offset)
lists = [['chr2', 12, 13, 'insertion', 'AAA', 2],['chr1', 10, 19, 'inversion', '-', 0]]
expected_df = | pd.DataFrame(lists) | pandas.DataFrame |
import traceback
import math
import pandas as pd
import numpy as np
import math
from sympy.solvers import solve
from sympy import Symbol
from .WeatherDelay import WeatherDelay as WD
from .CostModule import CostModule
class FoundationCost(CostModule):
"""
**FoundationCost.py**
- Created by <NAME> and <NAME> on Apr. 3, 2018
- Refactored by <NAME> and <NAME> on June 3, 2019
\nCalculates the costs of constructing foundations for land-based wind projects *(items in brackets are not yet implemented)*:
* Get number of turbines
* Get duration of construction
* Get daily hours of operation* # todo: add to process diagram
* Get season of construction* # todo: add to process diagram
* [Get region]
* Get rotor diameter
* Get hub height
* Get turbine rating
* Get buoyant foundation design flag
* [Get seismic zone]
* Get tower technology type
* Get hourly weather data
* [Get specific seasonal delays]
* [Get long-term, site-specific climate data]
* Get price data
* Get labor rates
* Get material prices for steel and concrete
* [Use region to determine weather data]
\n\nGiven below is the set of calculations carried out in this module:
* Calculate the foundation loads using the rotor diameter, hub height, and turbine rating
* Determine the foundation size based on the foundation loads, buoyant foundation design flag, and type of tower technology
* Estimate the amount of material needed for foundation construction based on foundation size and number of turbines
* Estimate the amount of time required to construct foundation based on foundation size, hours of operation, duration of construction, and number of turbines
* Estimate the additional amount of time for weather delays (currently only assessing wind delays) based on hourly weather data, construction time, hours of operation, and season of construction
* Estimate the amount of labor required for foundation construction based on foundation size, construction time, and weather delay
* Calculate number of workers by crew type
* Calculate man hours by crew type
* Estimate the amount of equipment needed for foundation construction based on foundation size, construction time, and weather delay
* Calculate number of equipment by equip type
* Calculate equipment hours by equip type
- Calculate the total foundation cost based on amount of equipment, amount of labor, amount of material, and price data.
**Keys in the input dictionary are the following:**
depth
(int) depth of foundation [in m]
component_data
(pd.DataFrame) data frame with wind turbine component data
def __init__(self, input_dict, output_dict, project_name):
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
num_turbines
(int) total number of turbines in wind farm
duration_construction
(int) estimated construction time in months
num_delays
(int) Number of delay events #TODO: Check with Alicia the definition is appropriate
avg_hours_per_delay
(float) Average hours per delay event #TODO: Check with Alicia the definition is appropriate
std_dev_hours_per_delay
(float) Standard deviation from average hours per delay event #TODO: Check with Alicia the definition is appropriate
delay_speed_m_per_s
(float) wind speed above which weather delays kick in
start_delay_hours
(int)
mission_time_hours
(int)
gust_wind_speed_m_per_s
(float)
wind_height_of_interest_m
(int)
wind_shear_exponent
(float)
season_construct
list of seasons (like ['spring', 'summer']) for the construction.
time_construct
list of time windows for constructions. Use ['normal'] for a
0800 to 1800 schedule 10 hour schedule. Use ['long'] for an
overnight 1800 to 2359, 0000 to 0759 overnight schedule. Use
['normal', 'long'] for a 24-hour schedule.
operational_hrs_per_day
(float)
material_price
(pd.DataFrame) dataframe containing foundation cost related material prices
rsmeans
(pd.DataFrame) TODO: Formal definition for rsmeans?
**Keys in the output dictionary are the following:**
F_dead_kN_per_turbine
(float) foundation dead load [in kN]
F_horiz_kN_per_turbine
(float) total lateral load [kN]
M_tot_kN_m_per_turbine
(float) Moment [kN.m]
Radius_o_m
(float) foundation radius based on overturning moment [in m]
Radius_g_m
(float) foundation radius based on gapping [in m]
Radius_b_m
(float) foundation radius based on bearing pressure [in m]
Radius_m
(float) largest foundation radius based on all three foundation design criteria: moment, gapping, bearing [in m]
foundation_volume_concrete_m3_per_turbine
(float) volume of a round, raft foundation [in m^3]
steel_mass_short_ton
(float) short tons of reinforcing steel
material_needs_per_turbine
(pd.DataFrame) table containing material needs info for -> Steel - rebar, Concrete 5000 psi, Excavated dirt, Backfill.
operation_data
(pd.DataFrame) TODO: What's the best one line definition for this?
**TODO: Weather delay set of outputs -> ask Alicia for formal definitions of these keys.**
total_foundation_cost
(pd.DataFrame) summary of foundation costs (in USD) broken down into 4 main categories:
1. Equipment Rental
2. Labor
3. Materials
4. Mobilization
"""
def __init__(self, input_dict, output_dict, project_name):
"""
Parameters
----------
input_dict : dict
The input dictionary with key value pairs described in the
class documentation
output_dict : dict
The output dictionary with key value pairs as found on the
output documentation.
"""
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
#Constants used in FoundationCost class. Making this data private (hidden from outside of this class): #TODO: Change private variables to protected.
self._kg_per_tonne = 1000
self._cubicm_per_cubicft = 0.0283168
self._steel_density = 9490 # kg / m^3
self._cubicyd_per_cubicm = 1.30795
self._ton_per_tonne = 0.907185
def calculate_foundation_load(self, foundation_load_input_data, foundation_load_output_data):
"""
Function to calculate foundation load.
Parameters
-------
Int Section height m
Surface area sq (in m^2)
Coeff drag (installed)
Lever arm m (in m)
Multplier drag rotor
Multiplier tower drag
Mass tonne
Returns
-------
Dead load [in N] -> F_dead_kN_per_turbine
Lateral load [in N] -> F_horiz_kN_per_turbine
Moment [N.m] -> M_tot_kN_m_per_turbine
Foundation radius based on overturning moment [in m] -> Radius_o_m
Foundation radius based on slipping [in m] -> Radius_s_m
Foundation radius based on gapping [in m] -> Radius_g_m
Foundation radius based on bearing pressure [in m] -> Radius_b_m
Largest foundation radius based on all three foundation design criteria (moment, gapping, bearing [in m]) -> Radius_m
"""
# set exposure constants
a = 9.5
z_g = 274.32
# get section height
z = foundation_load_input_data['Section height m']
# get cross-sectional area
a_f = foundation_load_input_data['Surface area sq m']
# get coefficient of drag
c_d = foundation_load_input_data['Coeff drag (installed)']
# get lever arm
l = foundation_load_input_data['Lever arm m']
# get multipliers for tower and rotor
multiplier_rotor = foundation_load_input_data['Multplier drag rotor']
multiplier_tower = foundation_load_input_data['Multiplier tower drag']
# calculate wind pressure
k_z = 2.01 * (z / z_g) ** (2 / a) # exposure factor
k_d = 0.95 # wind directionality factor
k_zt = 1 # topographic factor
v = foundation_load_input_data['gust_velocity_m_per_s']
wind_pressure = 0.613 * k_z * k_zt * k_d * v ** 2
# calculate wind loads on each tower component
g = 0.85 # gust factor
c_f = 0.6 # coefficient of force
f_t = (wind_pressure * g * c_f * a_f) * multiplier_tower
# calculate drag rotor
rho = 1.225 # air density in kg/m^3
f_r = (0.5 * rho * c_d * a_f * v ** 2) * multiplier_rotor
f = (f_t + f_r)
# calculate dead load in N
g = 9.8 # m / s ^ 2
f_dead = sum(foundation_load_input_data['Mass tonne']) * g * self._kg_per_tonne / 1.15 # scaling factor to adjust dead load for uplift
# calculate moment from each component at base of tower
m_overturn = f * l
# get total lateral load (N) and moment (N * m)
f_lat = f.sum() # todo: add f_lat (drag force) to output csv
m_overturn = m_overturn.sum()
# compare to moment from rated thrust
rated_thrust = foundation_load_input_data['rated_thrust_N']
m_thrust = rated_thrust * max(l)
m_tot = max(m_thrust, m_overturn)
# compare lateral load to rated thrust
f_horiz = max(f_lat, rated_thrust)
# calculate foundation radius based on overturning moment
vol_fraction_fill = 0.55
vol_fraction_concrete = 1 - vol_fraction_fill
safety_overturn = 1.5
unit_weight_fill = 17.3e3 # in N / m^3
unit_weight_concrete = 23.6e3 # in N / m^3
bearing_pressure = foundation_load_input_data['bearing_pressure_n_m2']
p = [(np.pi * foundation_load_input_data['depth'] * (vol_fraction_fill * unit_weight_fill + vol_fraction_concrete * unit_weight_concrete)), 0, f_dead, - (safety_overturn * (m_tot + f_horiz * foundation_load_input_data['depth']))]
r_overturn = np.roots(p)
r_overturn = np.real(r_overturn[np.isreal(r_overturn)])[0]
# calculate foundation radius based on slipping
safety_slipping = 1.5
friction_angle_soil = 25
tangent_slip_angle = math.tan((friction_angle_soil * math.pi) / 180)
slipping_force_with_sf = (safety_slipping * f_lat)
# first check if slipping is already satisfied by dead weight
if slipping_force_with_sf < (f_dead * tangent_slip_angle):
r_slipping = 0
else:
# Calculate foundation radius based on slipping:
r_slipping = (((slipping_force_with_sf / tangent_slip_angle) - f_dead) /
((vol_fraction_fill * unit_weight_fill + vol_fraction_concrete * unit_weight_concrete) * math.pi * foundation_load_input_data['depth'])) ** 0.5
r_test_gapping = max(r_overturn, r_slipping)
# calculate foundation radius based on gapping
# check if gapping constrain is already satisfied - r / 3 < e
foundation_vol = np.pi * r_test_gapping ** 2 * foundation_load_input_data['depth']
v_1 = (foundation_vol * (
vol_fraction_fill * unit_weight_fill + vol_fraction_concrete * unit_weight_concrete) + f_dead)
e = m_tot / v_1
if (r_test_gapping / 3) < e:
r_gapping = 0
else:
r_g = Symbol('r_g', real=True, positive=True)
foundation_vol = np.pi * r_g ** 2 * foundation_load_input_data['depth']
v_1 = (foundation_vol * (vol_fraction_fill * unit_weight_fill + vol_fraction_concrete * unit_weight_concrete) + f_dead)
e = m_tot / v_1
r_gapping = solve(e * 3 - r_g, r_g)
if len(r_gapping) > 0:
r_gapping = max(r_gapping)
else:
r_gapping = 0
r_test_bearing = max(r_test_gapping, r_gapping)
# calculate foundation radius based on bearing pressure
r_b = Symbol('r_b', real=True, positive=True)
foundation_vol = np.pi * r_test_bearing ** 2 * foundation_load_input_data['depth']
v_1 = (foundation_vol * (vol_fraction_fill * unit_weight_fill + vol_fraction_concrete * unit_weight_concrete) + f_dead)
e = m_tot / v_1
a_eff = v_1 / bearing_pressure
r_bearing = solve(2 * (r_b ** 2 - e * (r_b ** 2 - e ** 2) ** 0.5) - a_eff, r_b)
if len(r_bearing) > 0:
r_bearing = max(r_bearing)
else:
r_bearing = 0
# pick the largest foundation radius based on all 4 foundation design criteria: moment, gapping, bearing, slipping
r_choosen = max(r_bearing, r_overturn, r_slipping, r_gapping)
foundation_load_output_data['F_dead_kN_per_turbine'] = f_dead / 1e3
foundation_load_output_data['F_horiz_kN_per_turbine'] = f_lat / 1e3
foundation_load_output_data['M_tot_kN_m_per_turbine'] = m_tot / 1e3
foundation_load_output_data['Radius_o_m'] = r_overturn
foundation_load_output_data['Radius_s_m'] = r_slipping
foundation_load_output_data['Radius_g_m'] = r_gapping
foundation_load_output_data['Radius_b_m'] = r_bearing
foundation_load_output_data['Radius_m'] = r_choosen
return foundation_load_output_data
def determine_foundation_size(self, foundation_size_input_data, foundation_size_output_data):
"""
Function to calculate the volume of a round, raft foundation. Assumes foundation made of concrete with 1 m thickness.
Parameters
-------
Largest foundation radius based on all three foundation design criteria: moment, gapping, bearing [in m] -> Radius_m [in m]
depth of foundation [in m] -> depth
Returns
-------
Foundation volume [in m^3] -> foundation_volume_concrete_m3_per_turbine
"""
r = float(foundation_size_output_data['Radius_m'])
foundation_size_output_data['excavated_volume_m3'] = np.pi * (r + 0.5) ** 2 * foundation_size_input_data['depth']
foundation_size_output_data['foundation_volume_concrete_m3_per_turbine'] = np.pi * r ** 2 * foundation_size_input_data['depth'] * 0.45 # only compute the portion of the foundation that is composed of concrete (45% concrete; other portion is backfill); TODO: Add to sphinx -> (volume excavated = pi*(r_pick + .5m)^2 this assumes vertical sides which does not reflect reality as OSHA requires benched sides over 3’)
return foundation_size_output_data
def estimate_material_needs_per_turbine(self, material_needs_per_turbine_input_data, material_needs_per_turbine_output_data):
"""
Function to estimate amount of material based on foundation size and number of turbines.
Parameters
-------
Foundation concrete volume [in m^3] -> foundation_volume_concrete_m3_per_turbine
Returns
-------
(Returns pd.DataFrame) material_needs_per_turbine
"""
steel_mass_short_ton_per_turbine = (material_needs_per_turbine_output_data['foundation_volume_concrete_m3_per_turbine'] * 0.012 * self._steel_density / self._kg_per_tonne)
concrete_volume_cubic_yards_per_turbine = material_needs_per_turbine_output_data['foundation_volume_concrete_m3_per_turbine'] * 0.985 * self._cubicyd_per_cubicm
#Assign values to output dictionary:
material_needs_per_turbine_output_data['material_needs_per_turbine'] = pd.DataFrame([['Steel - rebar', steel_mass_short_ton_per_turbine, 'ton (short)'],
['Concrete 5000 psi', concrete_volume_cubic_yards_per_turbine, 'cubic yards'],
['Excavated dirt', material_needs_per_turbine_output_data['excavated_volume_m3'] * self._cubicyd_per_cubicm, 'cubic_yards'],
['Backfill', material_needs_per_turbine_output_data['excavated_volume_m3'] * self._cubicyd_per_cubicm, 'cubic_yards']],
columns=['Material type ID', 'Quantity of material', 'Units'])
material_needs_per_turbine_output_data['steel_mass_short_ton_per_turbine'] = steel_mass_short_ton_per_turbine
return material_needs_per_turbine_output_data['material_needs_per_turbine']
def estimate_construction_time(self, construction_time_input_data, construction_time_output_data):
"""
Function to estimate construction time on per turbine basis. TODO: What's a better definition of this function. It's task is to return a pd.DataFrame (operation_data).
Parameters
-------
duration_construction
pd.DataFrame
rsmeans
pd.DataFrame
material_needs_per_turbine
Returns
-------
(pd.DataFrame) operation_data
"""
foundation_construction_time = construction_time_input_data['construct_duration'] * 1 / 3
#throughput_operations = construction_time_input_data['throughput_operations']
throughput_operations = construction_time_input_data['rsmeans']
material_needs_per_turbine = construction_time_output_data['material_needs_per_turbine']
quantity_materials_entire_farm = material_needs_per_turbine['Quantity of material'] * construction_time_input_data['num_turbines']
#Calculations for estimate construction time will be on entire wind farm basis:
construction_time_output_data['material_needs_entire_farm'] = material_needs_per_turbine.copy()
material_needs_entire_farm = construction_time_output_data['material_needs_entire_farm']
material_needs_entire_farm['Quantity of material'] = quantity_materials_entire_farm
operation_data = throughput_operations.where(throughput_operations['Module'] == 'Foundations').dropna(thresh=4)
#operation data for entire wind farm:
operation_data = pd.merge(material_needs_entire_farm, operation_data, on=['Material type ID'], how='outer')
operation_data['Number of days'] = operation_data['Quantity of material'] / operation_data['Daily output']
operation_data['Number of crews'] = np.ceil((operation_data['Number of days'] / 30) / foundation_construction_time)
alpha = operation_data[operation_data['Type of cost'] == 'Labor']
operation_data_id_days_crews_workers = alpha[['Operation ID', 'Number of days', 'Number of crews', 'Number of workers']]
# if more than one crew needed to complete within construction duration then assume that all construction happens
# within that window and use that timeframe for weather delays; if not, use the number of days calculated
operation_data['time_construct_bool'] = operation_data['Number of days'] > foundation_construction_time * 30
boolean_dictionary = {True: foundation_construction_time * 30, False: np.NAN}
operation_data['time_construct_bool'] = operation_data['time_construct_bool'].map(boolean_dictionary)
operation_data['Time construct days'] = operation_data[['time_construct_bool', 'Number of days']].min(axis=1)
num_days = operation_data['Time construct days'].max()
construction_time_output_data['operation_data_id_days_crews_workers'] = operation_data_id_days_crews_workers
construction_time_output_data['operation_data_entire_farm'] = operation_data
# pull out management data #TODO: Add this cost to Labor cost next
crew_cost = self.input_dict['crew_cost']
crew = self.input_dict['crew'][self.input_dict['crew']['Crew type ID'].str.contains('M0')]
management_crew = pd.merge(crew_cost, crew, on=['Labor type ID'])
management_crew = management_crew.assign(per_diem_total=management_crew['Per diem USD per day'] * management_crew['Number of workers'] * num_days)
management_crew = management_crew.assign(hourly_costs_total=management_crew['Hourly rate USD per hour'] * self.input_dict['hour_day'][self.input_dict['time_construct']] * num_days)
management_crew = management_crew.assign(total_crew_cost_before_wind_delay=management_crew['per_diem_total'] + management_crew['hourly_costs_total'])
self.output_dict['management_crew'] = management_crew
self.output_dict['managament_crew_cost_before_wind_delay'] = management_crew['total_crew_cost_before_wind_delay'].sum()
return construction_time_output_data['operation_data_entire_farm']
def calculate_weather_delay(self, weather_delay_input_data, weather_delay_output_data):
"""
Function to calculate wind delay for foundations.
Keys in weather_delay_input_data
--------------------------------
weather_window
duration_construction
start_delay
critical_wind_speed
operational_hrs_per_day
height_interest
wind_shear_exponent
"""
# construct WeatherDelay module
WD(weather_delay_input_data, weather_delay_output_data)
# compute weather delay
wind_delay = | pd.DataFrame(weather_delay_output_data['wind_delays']) | pandas.DataFrame |
"""Helper functions for random forest classification and regression
Author <NAME>"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
#from . import ml_sets as sets
from result_analysis import ml_analysis as ml_an
from result_analysis import photoz_analysis as pz_an
#to standardize scaling
from sklearn.preprocessing import RobustScaler
#clean up memory
import gc
import math
def build_matrices(df, features,label, drop_nans = True):
"""This routines returns the feature matrix X to use for the classification
and the label vector y based on the input DataFrame. The label column must
be df.label and the features must be valid column names of the DataFrame
Input:
df (DataFrame)
features (list) list of label names to be considered
Output:
X (Numpy Array, 2D) feature matrix
y (Numpy Array, 1D) label vector
"""
if drop_nans:
df.dropna(axis=0,how='any',subset=features,inplace=True)
X = np.array(df[features])
y = np.array(df[label])
return X,y
def build_matrix(df, features,drop_nans = False):
"""This routines returns the feature matrix X to use for the classification.
The features must be valid column names of the DataFrame.
Input:
df (DataFrame)
features (list) list of label names to be considered
Output:
X (Numpy Array, 2D) feature matrix
"""
if drop_nans:
df.dropna(axis=0,how='any',subset=features,inplace=True)
X = np.array(df[features])
return X
def rf_class_grid_search(df_train,df_pred, features, label, param_grid, rand_state, scores, name):
"""This routine calculates the random forest classification on a grid of
hyper-parameters for the random forest method to test the best
hyper-parameters. The analysis results of the test will be written out and
saved.
Parameters:
df : pandas dataframe
The dataframe containing the features and the label for the
regression.
features : list of strings
List of features
label : string
The label for the regression
param_grid : dictionary-like structure
Parameter grid of input parameters for the grid search
rand_state : integer
Setting the random state variables to ensure reproducibility
scores : list of strings
Setting the score by which the grid search should be evaluated
name : strings
Setting the name of the output file for the grid search which
contains all information about the grid
"""
X_train, y_train = build_matrices(df_train, features,label=label)
X_test, y_test = build_matrices(df_pred, features,label=label)
print ("Trainingset: ", X_train.shape)
print(pd.Series(y_train).value_counts())
print("Testset:", X_test.shape)
print(pd.Series(y_test).value_counts())
for score in scores:
print(("# Tuning hyper-parameters for %s" % score))
print()
clf = GridSearchCV(RandomForestClassifier(random_state=rand_state),
param_grid, cv=5, scoring='%s' % score, n_jobs = 15, return_train_score=True)
clf.fit(X_train, y_train)
print("Detailed classification report:")
print("")
print("The model is trained on the training set.")
print("The scores are computed on the test set.")
print("")
y_true, y_pred = y_test, clf.predict(X_test)
y_true = y_true.astype('str')
y_pred = y_pred.astype('str')
print((classification_report(y_true, y_pred)))
print()
print("Best parameters set found on training set:")
print()
print((clf.best_params_))
print()
print("Grid scores on training set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print(("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params)))
print()
df = | pd.DataFrame(clf.cv_results_) | pandas.DataFrame |
import random
import pandas as pd
import numpy as np
from faker import Faker
mx= | pd.read_csv('mx.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.stats import norm
###############################################################################
#Non-Standard Imports
###############################################################################
try:
from . import curvefitting as cf
from .models import settings_handler as sh
except:
import curvefitting as cf
from models import settings_handler as sh
###############################################################################
#AIC Calculation
###############################################################################
def calculate_aic(data, models, params, priors={}):
'''Calculates AIC of model calculating posterior and than applying AIC
formula. Returns a DataFrame of the AIC Values.
Parameters
----------
data : dict
Curve-fitting data.
models : dict
The models used for curve-fitting.
params : pandas.DataFrame
The parameters with which to integrate the models with.
priors : dict, optional
Priors if any. The default is {}.
Returns
-------
table : pandas.DataFrame
A DataFrame with AIC values.
'''
aic = {}
#Standardize format
try:
params1 = | pd.DataFrame(params) | pandas.DataFrame |
import streamlit as st
import pandas as pd
###################################
from st_aggrid import AgGrid
from st_aggrid.grid_options_builder import GridOptionsBuilder
from st_aggrid.shared import JsCode
###################################
from functionforDownloadButtons import download_button
###################################
def _max_width_():
max_width_str = f"max-width: 1800px;"
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
{max_width_str}
}}
</style>
""",
unsafe_allow_html=True,
)
st.set_page_config(page_icon="✂️", page_title="CSV Wrangler")
# st.image("https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/285/balloon_1f388.png", width=100)
st.image(
"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/285/scissors_2702-fe0f.png",
width=100,
)
st.title("CSV Wrangler")
# st.caption(
# "PRD : TBC | Streamlit Ag-Grid from <NAME>: https://pypi.org/project/streamlit-aggrid/"
# )
# ModelType = st.radio(
# "Choose your model",
# ["Flair", "DistilBERT (Default)"],
# help="At present, you can choose between 2 models (Flair or DistilBERT) to embed your text. More to come!",
# )
# with st.expander("ToDo's", expanded=False):
# st.markdown(
# """
# - Add pandas.json_normalize() - https://streamlit.slack.com/archives/D02CQ5Z5GHG/p1633102204005500
# - **Remove 200 MB limit and test with larger CSVs**. Currently, the content is embedded in base64 format, so we may end up with a large HTML file for the browser to render
# - **Add an encoding selector** (to cater for a wider array of encoding types)
# - **Expand accepted file types** (currently only .csv can be imported. Could expand to .xlsx, .txt & more)
# - Add the ability to convert to pivot → filter → export wrangled output (Pablo is due to change AgGrid to allow export of pivoted/grouped data)
# """
# )
#
# st.text("")
c29, c30, c31 = st.columns([1, 6, 1])
with c30:
uploaded_file = st.file_uploader(
"",
key="1",
help="To activate 'wide mode', go to the hamburger menu > Settings > turn on 'wide mode'",
)
if uploaded_file is not None:
file_container = st.expander("Check your uploaded .csv")
shows = pd.read_csv(uploaded_file)
uploaded_file.seek(0)
file_container.write(shows)
else:
st.info(
f"""
👆 Upload a .csv file first. Sample to try: [biostats.csv](https://people.sc.fsu.edu/~jburkardt/data/csv/biostats.csv)
"""
)
st.stop()
from st_aggrid import GridUpdateMode, DataReturnMode
gb = GridOptionsBuilder.from_dataframe(shows)
# enables pivoting on all columns, however i'd need to change ag grid to allow export of pivoted/grouped data, however it select/filters groups
gb.configure_default_column(enablePivot=True, enableValue=True, enableRowGroup=True)
gb.configure_selection(selection_mode="multiple", use_checkbox=True)
gb.configure_side_bar() # side_bar is clearly a typo :) should by sidebar
gridOptions = gb.build()
st.success(
f"""
💡 Tip! Hold the shift key when selecting rows to select multiple rows at once!
"""
)
response = AgGrid(
shows,
gridOptions=gridOptions,
enable_enterprise_modules=True,
update_mode=GridUpdateMode.MODEL_CHANGED,
data_return_mode=DataReturnMode.FILTERED_AND_SORTED,
fit_columns_on_grid_load=False,
)
df = | pd.DataFrame(response["selected_rows"]) | pandas.DataFrame |
import pandas as pd
import sys
import glob
import os
import re
import numpy as np
import logging
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
#inside pathx (MD)
def time_freq_filter(filex,complexName,per):
pathx = os.getcwd()
file = os.path.basename(filex)
fName = complexName
bondtype = file.split(".csv")[0].split("_merged_")[1]
first = pd.read_csv(filex)
os.chdir(pathx)
if not os.path.exists(f'{complexName}/04_time_freq_filter'):
os.makedirs(f'{complexName}/04_time_freq_filter', exist_ok=True)
pathxx=f'{pathx}/{complexName}/04_time_freq_filter'
os.chdir(pathxx)
pathy=pathxx+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered", exist_ok=True)
os.chdir(pathy)
if first.empty:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=["donor_acceptor","NumSpp","total","percentage"])
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres=pd.DataFrame(columns=['itype', 'donor_chain', 'acceptor_chain', 'donor_resnm', 'acceptor_resnm',
'donor_resid','acceptor_resid', 'donor_atom', 'acceptor_atom','chain_type',
"prot_or_dna",'specificity',"time"])
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
else:
#fIRST
logging.info('Finding percentages: {}'.format(fName))
firstx = []
for adx in first.donor_acceptor.unique () :
bbx = first[first["donor_acceptor"] == adx]
firstx.append([adx,
bbx.time.unique().size/first.time.unique().size*100])
firstxy = pd.DataFrame(firstx)
firstxy.columns = ["donor_acceptor","percentage"]
logging.info('Writing to file percentage: {}'.format(fName))
morefirstxy = firstxy[firstxy.percentage > float(per)]
if len(morefirstxy.donor_acceptor) == 0:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=firstxy.columns)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per) + "_freq_perres"):
os.makedirs(str(per) + "_freq_perres")
pathq = pathy + "/" + str(per) + "_freq_perres"
os.chdir(pathq)
first_perres= pd.DataFrame(columns=first.columns)
first_perres.to_csv(pathq + "/" + fName + "_" + bondtype + "_" + str(per) + "_freq_perres.csv", index=None)
else:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
logging.info('Writing to file list: {}'.format(fName))
first_perres = pd.DataFrame()
for da in morefirstxy.donor_acceptor.unique():
df = first[first.donor_acceptor == da]
first_perres=first_perres.append(df)
first_perres.sort_values(by="time",inplace=True)
first_perres.reset_index(drop=True)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
def make_freq_folders(pathy,per):
"""
Creates folders to write and read common and complex-specific bonds within 05_compare_cx_spp folder
:param pathy: path to 05_compare_cx_spp
:param per: time percentage
"""
import os
os.chdir(pathy)
pathz=pathy+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered",exist_ok=True)
for fold in ["_freq","_freq_perres"]:
os.chdir(pathz)
#to add freq
pathq=pathz+"/"+str(per)+fold
if not os.path.exists(str(per)+fold):
os.makedirs(str(per)+fold,exist_ok=True)
os.chdir(pathq)
pathq_common=pathq+"/common"
if not os.path.exists("common"):
os.makedirs("common",exist_ok=True)
os.chdir(pathq)
pathq_spp=pathq+"/complex_specific"
if not os.path.exists("complex_specific"):
os.makedirs("complex_specific",exist_ok=True)
def get_paths(pathy,per,fold,com_spp):
import os
os.chdir(pathy)
PathToWrite = pathy + "/" + per + "_" + "freq_filtered/" + per + fold + "/" + com_spp
return PathToWrite
def compare_bonds(complexName,per):
pathx = os.getcwd()
fName = complexName[0]
sName = complexName[1]
file_lists_freq_fName = glob.glob(f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq_sName = glob.glob(f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq = file_lists_freq_fName + file_lists_freq_sName
ToCompare = {}
for filex in file_lists_freq:
file = os.path.basename(filex)
if fName in filex:
Name = fName
else:
Name = sName
bondtype = file.split(f'{Name}_')[1].split("_")[0]
if bondtype == "ring":
bondtype = "ring_stacking"
first = pd.read_csv(filex)
if bondtype in ToCompare.keys():
ToCompare[bondtype].update({Name: first})
else:
ToCompare.update({bondtype: {Name: first}})
for bondtype in ToCompare.keys():
os.chdir(pathx)
pathy = f'{pathx}/{fName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{fName}/05_compare_complex'):
os.makedirs(f'{pathx}/{fName}/05_compare_complex',exist_ok=True)
os.chdir(pathy)
pathz = f'{pathx}/{sName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{sName}/05_compare_complex'):
os.makedirs(f'{pathx}/{sName}/05_compare_complex',exist_ok=True)
os.chdir(pathz)
make_freq_folders(pathy, per)
fold="_freq"
morefirstxy = ToCompare[bondtype][fName]
fold="_freq_perres"
patha=f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
first = pd.read_csv(patha+"/"+fName+"_"+bondtype+"_"+str(per)+fold+".csv")
#SECOND
make_freq_folders(pathz, per)
fold="_freq"
moresecxy = ToCompare[bondtype][sName]
logging.info("sName : {}".format(sName))
fold="_freq_perres"
patha=f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
sec = pd.read_csv(patha+"/"+sName+"_"+bondtype+"_"+str(per)+fold+".csv")
#find bonds specific to first one
logging.info("Specific to {}".format(fName))
i = 0
spp_first= pd.DataFrame(columns=morefirstxy.columns)
common_first= pd.DataFrame(columns=morefirstxy.columns)
for item in morefirstxy.donor_acceptor:
item_swapped = item.split(":")[1]+":"+item.split(":")[0]
if item in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
elif item_swapped in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
else:
spp_first = spp_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
i = i+1
spp_first.sort_values(by="donor_acceptor", ascending=False)
spp_first.reset_index(drop=True,inplace=True)
fold="_freq"
com_spp="complex_specific"
pathq_spp=get_paths(pathy,str(per),fold,com_spp)
spp_first.to_csv (pathq_spp+"/"+fName+"_"+bondtype+"_compared_spec.csv", index=False)
common_first.sort_values(by="donor_acceptor", ascending=False)
common_first.reset_index(drop=True,inplace=True)
com_spp="common"
pathq_common=get_paths(pathy,str(per),fold,com_spp)
common_first.to_csv (pathq_common+"/"+fName+"_"+bondtype+"_compared_common.csv", index=False)
#find bonds specific to second one
logging.info("Specific to {}".format(sName))
i = 0
spp_sec= pd.DataFrame(columns=moresecxy.columns)
common_sec= pd.DataFrame(columns=moresecxy.columns)
for item in moresecxy.donor_acceptor:
item_swapped = item.split(":")[1] + ":" + item.split(":")[0]
if item in morefirstxy.donor_acceptor.unique():
common_sec = common_sec.append(pd.DataFrame(moresecxy.iloc[i,:]).transpose())
elif item_swapped in morefirstxy.donor_acceptor.unique():
common_sec = common_sec.append(pd.DataFrame(moresecxy.iloc[i,:]).transpose())
else:
spp_sec = spp_sec.append( | pd.DataFrame(moresecxy.iloc[i,:]) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
import MetaTrader5 as mt5
import pandas as pd
#get_ipython().run_line_magic('matplotlib', 'qt')
# %%
# Copying data to pandas data frame
n_days = 365
n_hours = 24
n_mins = 60
aq_window = n_days * n_hours * n_mins
plot_window = 72
# %%
# Initializing MT5 connection
mt5.initialize()
print(mt5.terminal_info())
print(mt5.version())
stockdata = pd.DataFrame()
rates = mt5.copy_rates_from_pos("EURUSD", mt5.TIMEFRAME_H1,0,100)
#rates = np.flip(rates,0)
rates.shape
# %%
data_frame = pd.DataFrame(rates,columns=['time','open','high','low','close','nn','nn1','nn2']).drop(['nn','nn1','nn2'],axis=1)
# %%
data_frame['date'] = | pd.Timestamp.to_pydatetime(data_frame['time']) | pandas.Timestamp.to_pydatetime |
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import Series, Timestamp
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize("val,expected", [
(2**63 - 1, 3),
(2**63, 4),
])
def test_loc_uint64(val, expected):
# see gh-19399
s = Series({2**63 - 1: 3, 2**63: 4})
assert s.loc[val] == expected
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(
test_data.series.loc[inds],
test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11] = 0
def test_loc_getitem_iterator(test_data):
idx = iter(test_data.series.index[:10])
result = test_data.series.loc[idx]
assert_series_equal(result, test_data.series[:10])
def test_loc_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
result = test_data.series.copy()
result.loc[mask] = 0
expected = test_data.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
msg = r"\['foo'\] not in index"
with pytest.raises(KeyError, match=msg):
test_data.series.loc[inds + ['foo']] = 5
def test_basic_setitem_with_labels(test_data):
indices = test_data.ts.index[[5, 10, 15]]
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index= | lrange(0, 20, 2) | pandas.compat.lrange |
import pandas as pd
from typing import List, Tuple, Text
import glob
import os
from bsuite.logging import csv_logging
from bsuite import sweep
def load_one_result_set(results_dir: Text) -> pd.DataFrame:
"""Returns a pandas DataFrame of bsuite results stored in results_dir."""
data = []
for file_path in glob.glob(os.path.join(results_dir, '*.csv')):
_, name = os.path.split(file_path)
# Rough and ready error-checking for only bsuite csv files.
if not name.startswith(csv_logging.BSUITE_PREFIX):
print('Warning - we recommend you use a fresh folder for bsuite results.')
continue
# Then we will assume that the file is actually a bsuite file
df = pd.read_csv(file_path)
file_bsuite_id = name.strip('.csv').split(csv_logging.INITIAL_SEPARATOR)[1]
bsuite_id = file_bsuite_id.replace(csv_logging.SAFE_SEPARATOR,
sweep.SEPARATOR)
df['bsuite_id'] = bsuite_id
df['results_dir'] = results_dir
data.append(df)
df = | pd.concat(data, sort=False) | pandas.concat |
import unittest
import pandas as pd
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_GCToo_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
logger = logging.getLogger(setup_GCToo_logger.LOGGER_NAME)
class TestGctoo(unittest.TestCase):
def test_init(self):
# Create test data
meth_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
cov_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
row_metadata_df = pd.DataFrame([["rhd_A", "rhd_B"], ["rhd_C", "rhd_D"]],
index=["A", "B"], columns=["rhd1", "rhd2"])
col_metadata_df = pd.DataFrame(["chd_a", "chd_b", "chd_c"],
index=["a", "b", "c"], columns=["chd1"])
# happy path, no multi-index
my_gctoo1 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
self.assertTrue(my_gctoo1.multi_index_meth_df == None,
'Expected no multi-index DataFrame but found {}'.format(my_gctoo1.multi_index_meth_df))
# happy path, with multi-index
my_gctoo2 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df, make_multiindex = True)
# doesn't pass due to visibility of pd.core?
# self.assertTrue(isinstance(my_gctoo2.multi_index_meth_df.index, pd.core.index.MultiIndex),
# "Expected a multi_index DataFrame but instead found {}". format(my_gctoo2.multi_index_meth_df))
#happy path, no metadata provided
my_gctoo3 = GCToo.GCToo(meth_df, cov_df)
self.assertIsNotNone(my_gctoo3.row_metadata_df)
self.assertIsNotNone(my_gctoo3.col_metadata_df)
def test__setattr__(self):
# case 1: not init yet, should just run __init__
# Create test data
meth_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
cov_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
index=["A", "B"], columns=["a", "b", "c"])
row_metadata_df = pd.DataFrame([["rhd_A", "rhd_B"], ["rhd_C", "rhd_D"]],
index=["A", "B"], columns=["rhd1", "rhd2"])
col_metadata_df = pd.DataFrame(["chd_a", "chd_b", "chd_c"],
index=["a", "b", "c"], columns=["chd1"])
## happy path, no multi-index
my_gctoo1 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,col_metadata_df=col_metadata_df)
## reset row_metadata_df: happy case
new_rid_order = ["B", "A"]
new_row_meta1 = my_gctoo1.row_metadata_df.copy().reindex(new_rid_order)
# shouldn't have any problems re-setting row_meta
my_gctoo1.row_metadata_df = new_row_meta1
pd.util.testing.assert_frame_equal(my_gctoo1.row_metadata_df, row_metadata_df)
## reset row_metadata_df: to not a DF
new_row_meta2 = "this is my new row metadata"
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta2
self.assertTrue("expected Pandas DataFrame, got something else" in str(context.exception))
## reset row_metadata_df: non-matching index values
new_row_meta3 = my_gctoo1.row_metadata_df.copy()
new_row_meta3.index = ["thing1", "thing2"]
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta3
self.assertTrue("The rids are inconsistent between data_df and row_metadata_df" in str(context.exception))
## reset row_metadata_df: not unique index values
new_row_meta4 = my_gctoo1.row_metadata_df.copy()
new_row_meta4.index = ["A", "A"]
with self.assertRaises(Exception) as context:
my_gctoo1.row_metadata_df = new_row_meta4
self.assertTrue("Index values must be unique" in str(context.exception))
my_gctoo2 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
## reset col_metadata_df: happy case
new_cid_order = ["c", "a", "b"]
new_col_meta1 = my_gctoo2.col_metadata_df.copy().reindex(new_cid_order)
# shouldn't have any problems
my_gctoo2.col_metadata_df = new_col_meta1
pd.util.testing.assert_frame_equal(my_gctoo2.col_metadata_df, col_metadata_df)
## reset col_metadata_df: to not a DF
new_col_meta2 = "this is my new col metadata"
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta2
self.assertTrue("expected Pandas DataFrame, got something else" in str(context.exception))
## reset col_metadata_df: non-matching index values
new_col_meta3 = my_gctoo2.col_metadata_df.copy()
new_col_meta3.index = ["thing1", "thing2", "thing3"]
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta3
self.assertTrue("The cids are inconsistent between data_df and col_metadata_df" in str(context.exception))
## reset col_metadata_df: not unique index values
new_col_meta4 = my_gctoo2.col_metadata_df.copy()
new_col_meta4.index = ["a", "b", "a"]
with self.assertRaises(Exception) as context:
my_gctoo2.col_metadata_df = new_col_meta4
self.assertTrue("Index values must be unique" in str(context.exception))
my_gctoo3 = GCToo.GCToo(meth_df=meth_df, cov_df=cov_df, row_metadata_df=row_metadata_df,
col_metadata_df=col_metadata_df)
## reset data_df: happy case
new_data_df1_tmp_m = my_gctoo3.meth_df.copy().reindex(new_rid_order)
new_data_df1_tmp_c = my_gctoo3.cov_df.copy().reindex(new_rid_order)
new_data_df1_m = new_data_df1_tmp_m.reindex(columns=new_cid_order)
new_data_df1_c = new_data_df1_tmp_c.reindex(columns=new_cid_order)
# shouldn't have problems
my_gctoo3.meth_df = new_data_df1_m
my_gctoo3.cov_df = new_data_df1_c
# resetting data_df means rearranging the row and col meta dfs
| pd.util.testing.assert_frame_equal(my_gctoo3.meth_df, new_data_df1_m) | pandas.util.testing.assert_frame_equal |
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
NaT,
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesMissingData:
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = cat == cat
tm.assert_series_equal(res, exp)
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, True, False])
de = Series(["a", 1.0], index=[0, 4])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(["a", np.inf, np.nan, 1.0])
with pd.option_context("mode.use_inf_as_null", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(["a", 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
# GH#16674 iNaT is treated as an integer when given by the user
td1[1] = iNaT
assert not isna(td1[1])
assert td1.dtype == np.object_
assert td1[1] == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# FIXME: don't leave commented-out
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= datetime_series <= 0.5
# expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
# tm.assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([], dtype=object)
assert len(s.dropna()) == 0
return_value = s.dropna(inplace=True)
assert return_value is None
assert len(s) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
s.dropna(axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series(
[
Timestamp("2011-01-01 10:00"),
pd.NaT,
Timestamp("2011-01-03 10:00"),
pd.NaT,
]
)
result = s.dropna()
expected = Series(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")], index=[0, 2]
)
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(
["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz="Asia/Tokyo"
)
s = Series(idx)
assert s.dtype == "datetime64[ns, Asia/Tokyo]"
result = s.dropna()
expected = Series(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),
],
index=[0, 2],
)
assert result.dtype == "datetime64[ns, Asia/Tokyo]"
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name="x"), Series([False, True, False], name="x")]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series(
[np.nan, 1, 2, 3],
| IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]) | pandas.IntervalIndex.from_arrays |
'''
author : <NAME>
roll no: 108118083
domain : Signal Processing and ML
subdomain : Machine Learning
'''
import numpy as np
import pandas as pd
import os
class NeuralNetwork():
def __init__(self, input_layer_size, hidden_layer_size, output_layer_size, directory=None):
'''
input_layer_size : no. of units in the input layer
hidden_layer_size : no.of units in hidden layer
ouput_layer_size : no. of units in ouput layer
directory : directory in which to save trained models weights
'''
self.input_layer_size = input_layer_size
self.hidden_layer_size = hidden_layer_size
self.output_layer_size = output_layer_size
self.directory = directory
def train(self, X, Y, learning_rate, epochs, initial_iteration=0, lambda_=1, weights_directory=None):
'''
X : training data
Y : training labels
learning rate : Learning rate for Gradient Descent
epochs : number of times the model sees the whole dataset
initial_iteration : iteration to start from
lambda : regularisation parameter
file_name : file name where weigths are saved, if loading weights
'''
print('shape of training dataset',X.shape)
print('input layer size =',self.input_layer_size)
print('hidden layer size =',self.hidden_layer_size)
print('output layer size =',self.output_layer_size)
if initial_iteration==0:
#Initializing random weights
Theta1 = self.randInitializeWeights(self.input_layer_size, self.hidden_layer_size)
Theta2 = self.randInitializeWeights(self.hidden_layer_size, self.output_layer_size)
loss_list = []
else:
#If continuing the training, then load the weights from weights_directory
theta1_df = | pd.read_csv(f'{weights_directory}/Theta1_{initial_iteration}.csv') | pandas.read_csv |
"""
Utils
"""
import os
import datetime as dt
import csv
import bisect
import pickle as pk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import constants
from constants import DATA_ATTRIBUTES
def is_float(target):
try:
_ = float(target)
except ValueError:
return False
else:
return True
def is_int(target):
"""
"""
try:
a = float(target)
b = int(a)
except ValueError:
return False
else:
return a == b
def read_csv_file(filename, columns=None, with_header=True):
"""
Read a csv file
Parameters:
-----------
filename : str
The path of the file to be read
columns : list, type of element should be 'int' or 'str'. Column name is case insensitive.
The indices of columns or names of columns to be returned.
Default is None, which means to return all columns
with_header : boolean
Indicate if the file has header or not. Default is 'True'.
"""
data_type = filename.split(os.sep)[-1]
is_gps = False
if data_type.startswith('gps'):
is_gps = True
with open(filename, 'r') as f:
if with_header:
header = f.readline()
header = header.replace('"', '').strip()
col_names = header.split(',')
col_names = [name.lower() for name in col_names]
# print(col_names)
# TODO: check if it is empty file
result = []
num_columns = None # the number of columns in the file
selected_cols = [] # the index of 'needed' columns
for line in f:
line = line.replace('"', '').strip()
elements = line.split(',')
if not num_columns:
num_columns = len(elements)
if columns:
if type(columns[0]) is int:
selected_cols = columns
elif type(columns[0]) is str:
selected_cols = [col_names.index(col.lower()) for col in columns]
else:
# return all columns if parameter 'columns' is not given
# use num_columns instead of len(col_names), since the length of headers might be larger, e.g. raw_obd
selected_cols = [i for i in range(num_columns)]
elif num_columns != len(elements): # means the last line is incomplete
break
# ignore 'network' obtained gps
if is_gps and elements[-1] == 'network':
continue
cur_row = []
for col_index in selected_cols:
value = elements[col_index]
# need to convert from string to the right type
if '.' in value:
value = float(value)
elif is_int(value):
value = int(value)
cur_row.append(value)
result.append(cur_row)
return np.array(result) # to keep consistant with pandas.read_csv
def read_raw_obd(filename, columns=None):
"""
TODO: handle cases when the last line is incomplete
"""
if not columns:
columns = ['timestamp', 'Speed']
return pd.read_csv(filename, sep=",", usecols=columns, error_bad_lines=False, engine="python", skipfooter=1).values
def read_gps(filename, columns=None, ignore_network=True):
"""
Read gps data from given file.
Parameters
----------
ignore_network : boolean, default=True
Ignore the lines obtained via network
"""
return read_csv_file(filename, columns=columns, with_header=True)
# the last line might be INCOMPLETE, so that we use engine="python" here and raise warning message
# instead of throwing error
# return pd.read_csv(filename, usecols=columns, error_bad_lines=False, engine="python").values
def read_raw_acc(filename, columns=None):
"""
Read acc data.
"""
# the last line might be incomplete.
print(filename)
# data = pd.read_csv(filename, sep=",", usecols=columns, error_bad_lines=False, engine="python")
# TODO: columns
# skip the header, since header in some files are not perfectly matching the content
# e.g. forester_weida/35823905098470/VehSenseData2018_03_20_19_56_59/raw_acc.txt
# but this should not happen for data collected afterward
data = pd.read_csv(filename, sep=",", usecols=[1, 3, 4, 5], skiprows=[0], header=None, error_bad_lines=False, engine="python", skipfooter=1)
# time_stamps = data[['sys_time']].values
# raw_acc = data[['raw_x_acc', 'raw_y_acc', 'raw_z_acc']].values
return data.values # time_stamps, raw_acc
def read_raw_gyro(filename, columns=None):
"""
Read the raw gyroscope file.
"""
# columns = ["sys_time", "raw_x_gyro", "raw_y_gyro", "raw_z_gyro"]
data = pd.read_csv(filename, sep=",", usecols=[1, 3, 4, 5], skiprows=[0], header=None, error_bad_lines=False, engine="python")
return data.values
def read_calibration_para(filename):
"""
"""
return | pd.read_csv(filename, sep=',', header=None) | pandas.read_csv |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import collections as cl
import pandas as pd
from .crop import Crop
import json
from .util import *
class Private():
def __init__(self, df, name, key, land_fraction):
self.T = len(df)
self.starting_year = df.index.year[0]
self.number_years = df.index.year[-1]-df.index.year[0]
self.key = key
self.name = name
self.leap = leap(np.arange(min(df.index.year), max(df.index.year) + 2))
year_list = np.arange(min(df.index.year), max(df.index.year) + 2)
self.days_in_month = days_in_month(year_list, self.leap)
self.dowy_eom = dowy_eom(year_list, self.leap)
self.non_leap_year = first_non_leap_year(self.dowy_eom)
self.turnback_use = True
self.has_pesticide = False
self.has_pmp = False
for k,v in json.load(open('cord/private/%s_properties.json' % key)).items():
setattr(self,k,v)
self.contract_fractions = {}
for x in self.district_list:
self.contract_fractions[x] = land_fraction
#intialize crop acreages and et demands for crops
self.irrdemand = {}
for x in self.district_list:
self.irrdemand[x] = Crop(self.zone[x])
#initialize dictionary to hold different delivery types
self.deliveries = {}
self.contract_list_all = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern', 'kings']
self.non_contract_delivery_list = ['inleiu','leiupumping','recharged','exchanged_SW', 'recover_banked']
for district in self.district_list:
self.deliveries[district] = {}
for x in self.contract_list_all:
#normal contract deliveries
self.deliveries[district][x] = np.zeros(self.number_years)
#uncontrolled deliveries from contract
self.deliveries[district][x + '_flood'] = np.zeros(self.number_years)
self.deliveries[district][x + '_flood_irrigation'] = np.zeros(self.number_years)
for x in self.non_contract_delivery_list:
#deliveries from a groundwater bank (reocrded by banking partner recieving recovery water)
self.deliveries[district][x] = np.zeros(self.number_years)
self.deliveries['exchanged_GW'] = np.zeros(self.number_years)
self.deliveries['undelivered_trades'] = np.zeros(self.number_years)
#set dictionaries to keep track of different 'color' water for each contract
self.current_balance = {}#contract water currently available in surface water storage
self.paper_balance = {}#balance (positive) or negative of paper trades made from groundwater banks
self.turnback_pool = {}#water purchased from intra-contract marketes (June 1st)
self.projected_supply = {}#projected annual allocation to each contract
self.carryover = {}#water 'carried over' in surface water storage from previous year's contract
self.recharge_carryover = {}#amount of water that the district wants to request contract deliveries for recharge
self.delivery_carryover = {}#amount of water to deliver immediately becuase of surface storage spillage
self.contract_carryover_list = {}#maximum carryover storage on contract
#initialize values for all contracts in dictionaries
for z in self.district_list:
self.current_balance[z] = {}
self.turnback_pool[z] = {}
self.projected_supply[z] = {}
self.carryover[z] = {}
self.recharge_carryover[z] = {}
self.delivery_carryover[z] = {}
self.contract_carryover_list[z] = {}
for y in self.contract_list_all:
self.current_balance[z][y] = 0.0
self.paper_balance[y] = 0.0
self.turnback_pool[z][y] = 0.0
self.projected_supply[z][y] = 0.0
self.carryover[z][y] = 0.0
self.recharge_carryover[z][y] = 0.0
self.delivery_carryover[z][y] = 0.0
self.contract_carryover_list[z][y] = 0.0
#initialize dictionaries to 'store' daily state variables (for export to csv)
self.daily_supplies = {}
supply_list = ['paper', 'carryover', 'allocation', 'delivery', 'leiu_accepted', 'banked', 'pumping', 'leiu_delivered', 'recharge_delivery', 'recharge_uncontrolled', 'banked_storage', 'annual_demand', 'contract_available', 'carryover_available', 'use_recharge', 'use_recovery', 'numdays', 'recharge_cap', 'recovery_cap']
for x in supply_list:
self.daily_supplies[x] = np.zeros(self.T)
#initialize dictionaries to 'store' annual change in state variables (for export to csv)
self.annual_supplies = {}
supply_list = ['delivery', 'leiu_accepted', 'leiu_delivered', 'banked_accepted', 'recharge_uncontrolled', 'recharge_delivery', 'banked_storage', 'acreage']
for x in supply_list:
self.annual_supplies[x] = np.zeros(self.number_years)
# hold all output
self.daily_supplies_full = {}
self.demand_days = {}
self.demand_days['current'] = {}
self.demand_days['lookahead'] = {}
# delivery_list = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern']
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'] = np.zeros(self.T)
self.daily_supplies_full[x + '_flood'] = np.zeros(self.T)
self.daily_supplies_full[x + '_projected'] = np.zeros(self.T)
self.daily_supplies_full[x + '_paper'] = np.zeros(self.T)
self.daily_supplies_full[x + '_carryover'] = np.zeros(self.T)
self.daily_supplies_full[x + '_turnback'] = np.zeros(self.T)
self.demand_days['current'][x] = 0.0
self.demand_days['lookahead'][x] = 0.0
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x] = np.zeros(self.T)
# ['recover_banked', 'inleiu', 'leiupumping', 'recharged', 'exchanged_GW', 'exchanged_SW', 'undelivered_trades']
#Initialize demands
self.annualdemand = {}
self.dailydemand = {}
self.dailydemand_start = {}
self.per_acre = {}
self.total_acreage = 0.0
self.unmet_et = {}
for x in self.district_list:
self.per_acre[x] = {}
self.unmet_et[x] = np.zeros(self.number_years)
self.unmet_et[x + "_total"] = np.zeros(self.number_years)
self.dailydemand_start[x] = 0.0
self.dailydemand[x] = 0.0
for v in self.crop_list:
self.per_acre[x][v] = {}
self.per_acre[x][v]['mature'] = 0.0
self.per_acre[x][v]['immature'] = 0.0
#recovery and pumping variables
#self.recovery_fraction = 0.5
self.annual_pumping = 0.0
self.use_recharge = 0.0
self.use_recovery = 0.0
self.extra_leiu_recovery = 0.0
self.max_recovery = 0.0
self.max_leiu_exchange = 0.0
self.total_banked_storage = 0.0
self.direct_recovery_delivery = {}
for x in self.district_list:
self.direct_recovery_delivery[x] = 0.0
#for in-district recharge & counters (for keeping track of how long a basin has been continuously 'wet'
self.recharge_rate = self.in_district_direct_recharge*cfs_tafd
self.thismonthuse = 0
self.monthusecounter = 0
self.monthemptycounter = 0
self.current_recharge_storage = 0.0
def object_equals(self, other):
##This function compares two instances of an object, returns True if all attributes are identical.
equality = {}
if (self.__dict__.keys() != other.__dict__.keys()):
return ('Different Attributes')
else:
differences = 0
for i in self.__dict__.keys():
if type(self.__getattribute__(i)) is dict:
equality[i] = True
for j in self.__getattribute__(i).keys():
if (type(self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) is bool):
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) == False):
equality[i] = False
differences += 1
else:
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]).all() == False):
equality[i] = False
differences += 1
else:
if (type(self.__getattribute__(i) == other.__getattribute__(i)) is bool):
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i))
if equality[i] == False:
differences += 1
else:
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i)).all()
if equality[i] == False:
differences += 1
return (differences == 0)
#####################################################################################################################
##################################DEMAND CALCULATION#################################################################
#####################################################################################################################
def permanent_crop_growth(self, wateryear):
self.total_acreage = 0.0
for x in self.district_list:
irrigation = 0.0
missing_demand = self.unmet_et[x][wateryear-1]
for y in self.contract_list_all:
irrigation += self.deliveries[x][y][wateryear-1]
irrigation -= self.deliveries[x]['recharged'][wateryear-1]
percent_filled = {}
for v in self.crop_list:
percent_filled[v] = np.zeros(25)
for ageloop in range(24, -1, -1):
for v in self.crop_list:
if ageloop < self.crop_maturity[v]:
total_et = self.per_acre[x][v]['immature']*(self.acreage[x][v][ageloop])
else:
total_et = self.per_acre[x][v]['mature']*(self.acreage[x][v][ageloop])
if total_et > 0.0:
percent_filled[v][ageloop] = 1.0 - max(min(missing_demand/total_et, 1.0), 0.0)
else:
percent_filled[v][ageloop] = 0.0
missing_demand -= total_et*(1.0 - percent_filled[v][ageloop])
for v in self.crop_list:
for ageloop in range(24, 0, -1):
self.acreage[x][v][ageloop] = self.acreage[x][v][ageloop-1]*percent_filled[v][ageloop-1]
self.total_acreage += self.acreage[x][v][ageloop]
if self.has_pesticide:
self.acreage[x][v][0] = self.initial_planting[x][v][wateryear]
else:
self.acreage[x][v][0] = self.initial_planting[x][v]
self.total_acreage += self.acreage[x][v][0]
for x in self.district_list:
self.per_acre[x] = {}
for v in self.crop_list:
self.per_acre[x][v] = {}
self.per_acre[x][v]['mature'] = 0.0
self.per_acre[x][v]['immature'] = 0.0
self.find_baseline_demands()
def find_baseline_demands(self):
self.monthlydemand = {}
self.agedemand = {}
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
for district in self.district_list:
self.monthlydemand[district] = {}
self.agedemand[district] = {}
district_et = self.irrdemand[district]
for wyt in wyt_list:
self.monthlydemand[district][wyt] = np.zeros(12)
self.agedemand[district][wyt] = np.zeros(25)
for i,v in enumerate(self.crop_list):
annualET = 0.0
annualETimm = 0.0
for monthloop in range(0,12):
annualET += max(district_et.etM[v][wyt][monthloop] - district_et.etM['precip'][wyt][monthloop],0.0)/(12.0)
annualETimm += max(district_et.etM[v + '_immature'][wyt][monthloop] - district_et.etM['precip'][wyt][monthloop],0.0)/(12.0)
for ageloop in range(0,25):
if ageloop < self.crop_maturity[v]:
self.agedemand[district][wyt][ageloop] += self.acreage[district][v][ageloop]*annualET*self.seepage[district]
else:
self.agedemand[district][wyt][ageloop] += self.acreage[district][v][ageloop]*annualETimm
for monthloop in range(0,12):
self.monthlydemand[district][wyt][monthloop] += self.urban_profile[district][monthloop]*self.MDD[district]/self.days_in_month[self.non_leap_year][monthloop]
for i,v in enumerate(self.crop_list):
age_length = len(self.acreage[district][v])
acres_mature = np.sum(self.acreage[district][v][self.crop_maturity[v]:age_length])
acres_immature = np.sum(self.acreage[district][v][0:self.crop_maturity[v]])
district_et = self.irrdemand[district]
self.monthlydemand[district][wyt][monthloop] += max(district_et.etM[v][wyt][monthloop] - district_et.etM['precip'][wyt][monthloop],0.0)*acres_mature/(12.0*self.days_in_month[self.non_leap_year][monthloop])
self.monthlydemand[district][wyt][monthloop] += max(district_et.etM[v + '_immature'][wyt][monthloop] - district_et.etM['precip'][wyt][monthloop],0.0)*acres_immature/(12.0*self.days_in_month[self.non_leap_year][monthloop])
def calc_demand(self, wateryear, year, da, m, m1, wyt):
#from the monthlydemand dictionary (calculated at the beginning of each wateryear based on ag acreage and urban demands), calculate the daily demand and the remaining annual demand
monthday = self.days_in_month[year][m-1]
for x in self.district_list:
self.dailydemand[x] = self.monthlydemand[x][wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[x][wyt][m1-1]*da/monthday
if self.dailydemand[x] < 0.0:
self.dailydemand[x] = 0.0
for x in self.district_list:
district_et = self.irrdemand[x]
for v in self.crop_list:
self.per_acre[x][v]['mature'] += (max(district_et.etM[v][wyt][m-1] - district_et.etM['precip'][wyt][m-1],0.0)*((monthday-da)/monthday) + max(district_et.etM[v][wyt][m1-1] - district_et.etM['precip'][wyt][m1-1],0.0)*(da/monthday))/(12.0*self.days_in_month[self.non_leap_year][m-1])
self.per_acre[x][v]['immature'] += (max(district_et.etM[v + '_immature'][wyt][m-1] - district_et.etM['precip'][wyt][m-1],0.0)*((monthday-da)/monthday) + max(district_et.etM[v][wyt][m1-1] - district_et.etM['precip'][wyt][m1-1],0.0)*(da/monthday))/(12.0*self.days_in_month[self.non_leap_year][m-1])
#calculate that days 'starting' demand (b/c demand is filled @multiple times, and if we only want to fill a certain fraction of that demand (based on projections of supply & demand for the rest of the year), we want to base that fraction on that day's total demand, not the demand left after other deliveries are made
self.dailydemand_start[x] = self.monthlydemand[x][wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[x][wyt][m1-1]*da/monthday
if self.dailydemand_start[x] < 0.0:
self.dailydemand_start[x] = 0.0
#pro-rate this month's demand based on the day of the month when calculating remaining annual demand
self.annualdemand[x] = max(self.monthlydemand[x][wyt][m-1]*(monthday-da), 0.0)
if m > 9:
for monthloop in range(m, 12):
self.annualdemand[x] += max(self.monthlydemand[x][wyt][monthloop]*self.days_in_month[year][monthloop],0.0)
for monthloop in range(0,9):
self.annualdemand[x] += max(self.monthlydemand[x][wyt][monthloop]*self.days_in_month[year+1][monthloop], 0.0)
else:
for monthloop in range(m, 9):
self.annualdemand[x] += max(self.monthlydemand[x][wyt][monthloop]*self.days_in_month[year][monthloop], 0.0)
def get_urban_demand(self, t, m, da, wateryear, year, sri, dowy, total_delta_pumping):
#this function finds demands for the 'branch pumping' urban nodes - Socal, South Bay, & Central Coast
#demand is equal to pumping of the main california aqueduct and into the branches that services these areas
#cal aqueduct urban demand comes from pumping data, calc seperately
for district in self.district_list:
self.dailydemand[district] = self.pumping[district][t]/1000.0
self.dailydemand_start[district] = self.pumping[district][t]/1000.0
self.ytd_pumping[district][wateryear] += self.dailydemand[district]
sri_estimate = (sri*self.delivery_percent_coefficient[district][dowy][0] + self.delivery_percent_coefficient[district][dowy][1])*total_delta_pumping
self.annualdemand[district] = max(0.0, (self.annual_pumping[district][wateryear]*dowy + sri_estimate*(364.0 - dowy))/364.0 - self.ytd_pumping[district][wateryear])
##Keep track of ytd pumping to Cal Aqueduct Branches
if m == 10 and da == 1:
self.monthlydemand = {}
for district in self.district_list:
self.monthlydemand[district] = {}
for wyt in ['W', 'AN', 'BN', 'D', 'C']:
self.monthlydemand[district][wyt] = np.zeros(12)
start_of_month = 0
cross_counter_y = 0
###Divide aqueduct branch pumping into 'monthly demands'
for monthloop in range(0,12):
monthcounter = monthloop + 9
if monthcounter > 11:
monthcounter -= 12
cross_counter_y = 1
start_next_month = self.dowy_eom[year+cross_counter_y][monthcounter] + 1
for wyt in ['W', 'AN', 'BN', 'D', 'C']:
for district in self.district_list:
self.monthlydemand[district][wyt][monthcounter] = np.mean(self.pumping[district][(t + start_of_month):(t + start_next_month)])/1000.0
start_of_month = start_next_month
def find_unmet_et(self, district_name, wateryear, dowy):
self.unmet_et[district_name][wateryear] += self.dailydemand[district_name]
self.unmet_et[district_name + "_total"][wateryear] += self.dailydemand_start[district_name]
def find_pre_flood_demand(self, wyt, year):
#calculates an estimate for water use in the Oct-Dec period (for use in recharge_carryover calculations), happens Oct 1
self.pre_flood_demand = self.monthlydemand[wyt][9]*self.days_in_month[year][9] + self.monthlydemand[wyt][10]*self.days_in_month[year][10] + self.monthlydemand[wyt][11]*self.days_in_month[year][11]
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################PROJECT CONTRACTS#################################################################
#####################################################################################################################
def update_balance(self, t, wateryear, water_available, projected_allocation, current_water, key, tot_carryover, balance_type, district_name, project_contract, rights):
###This function takes input from the contract class (water_available, projected_allocation, tot_carryover) to determine how much of their allocation remains
##water_available is the current contract storage in the reservoir *plus* all deliveries from the given year. The deliveries, paper trades, and turnback pool accounts for each district
##are used to get a running tally of the surface water that is currently available to them. (tot_carryover is subtracted from the current balance - districts only get access to *their*
##carryover storage - which is added to their individual current balance (self.carryover[key])
##projected_allocation is the water that is forecasted to be available on each contract through the end of the water year *plus* water that has already been delivered on that contract
##individual deliveries are then subtracted from this total to determine the individual district's projected contract allocation remaining in that year
if balance_type == 'contract':
#district_storage - district's water that is currently available (in storage at reservoir)
#(water_available - tot_carryover)*self.project_contract[key] - individual district share of the existing (in storage) contract balance, this includes contract water that has already been delivered to all contractors
#self.deliveries[key][wateryear] - individual district deliveries (how much of 'their' contract has already been delivered)
#self.carryover[key] - individual district share of contract carryover
#paper_balance[key] - keeps track of 'paper' groundwater trades (negative means they have accepted GW deliveries in exchange for trading some of their water stored in reservoir, positive means they sent their banked GW to another district in exchage for SW storage
#turnback_pool[key] - how much water was bought/sold on the turnback pool(negative is sold, positive is bought)
district_storage = (water_available-tot_carryover)*project_contract[key]*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
#annual allocation - remaining (undelivered) district share of expected total contract allocation
#same as above, but projected_allocation*self.project_contract[key] - individual share of expected total contract allocation, this includes contract water that has already been delivered to all contractors
annual_allocation = projected_allocation*project_contract[key]*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
storage_balance = current_water*project_contract[key]*self.private_fraction[district_name][wateryear] + max(self.carryover[district_name][key] + self.turnback_pool[district_name][key] - self.deliveries[district_name][key][wateryear], 0.0)
elif balance_type == 'right':
#same as above, but for contracts that are expressed as 'rights' instead of allocations
district_storage = (water_available-tot_carryover)*rights[key]['capacity']*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
annual_allocation = projected_allocation*rights[key]['capacity']*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
storage_balance = current_water*rights[key]['capacity']*self.private_fraction[district_name][wateryear] + max(self.carryover[district_name][key] + self.turnback_pool[district_name][key] - self.deliveries[district_name][key][wateryear], 0.0)
self.current_balance[district_name][key] = storage_balance
self.projected_supply[district_name][key] = annual_allocation
if balance_type == 'contract':
contract_amount = project_contract[key]
else:
contract_amount = rights[key]['capacity']
if key == 'tableA' and self.key == "XXX":
#print(wateryear, end = " ")
#print(t, end = " ")
#print(self.key, end = " ")
#print("%.2f" % projected_allocation, end = " ")
#print("%.2f" % self.deliveries[district_name][key][wateryear], end = " ")
#print("%.2f" % self.deliveries[district_name]['recharged'][wateryear], end = " ")
#print("%.2f" % self.deliveries[district_name]['recover_banked'][wateryear], end = " ")
#print("%.2f" % self.deliveries['exchanged_GW'][wateryear], end = " ")
#print("%.2f" % self.projected_supply[district_name][key], end = " ")
#print("%.2f" % self.annualdemand[district_name], end = " ")
#print("%.2f" % self.recharge_carryover[district_name][key], end = " ")
print("%.2f" % self.use_recovery)
return max(self.carryover[district_name][key] - self.deliveries[district_name][key][wateryear], 0.0)
def apply_paper_balance(self, key, wyt, wateryear):
ytd_deliveries = {}
remaining_projected = {}
remaining_paper = self.paper_balance[key]
ind_paper_balance = {}
for x in self.district_list:
ytd_deliveries[x] = self.deliveries[x][key][wateryear] - self.deliveries[x]['recharged'][wateryear]
ind_paper_balance[x] = 0.0
if self.projected_supply[x][key] < 0.0:
ind_paper_balance[x] = min(self.projected_supply[x][key]*-1.0, remaining_paper)
remaining_paper = max(self.projected_supply[x][key] + remaining_paper, 0.0)
remaining_projected[x] = self.projected_supply[x][key] + ind_paper_balance[x]
self.age_death = 25
if remaining_paper > 0.0:
for ageloop in range(0,25):
for x in self.district_list:
if self.agedemand[x][wyt][ageloop] < ytd_deliveries[x]:
ytd_deliveries[x] -= self.agedemand[x][wyt][ageloop]
elif self.agedemand[x][wyt][ageloop] < (ytd_deliveries[x] + remaining_projected[x]) :
remaining_projected[x] -= (self.agedemand[x][wyt][ageloop] - ytd_deliveries[x])
ytd_deliveries[x] = 0.0
elif self.agedemand[x][wyt][ageloop] < (ytd_deliveries[x] + remaining_projected[x] + remaining_paper):
ind_paper_balance[x] += (self.agedemand[x][wyt][ageloop] - ytd_deliveries[x] - remaining_projected[x])
remaining_paper -= (self.agedemand[x][wyt][ageloop] - ytd_deliveries[x] - remaining_projected[x])
ytd_deliveries[x] = 0.0
remaining_projected[x] = 0.0
elif remaining_paper > 0.0:
ind_paper_balance[x] += remaining_paper
remaining_paper = 0.0
self.age_death = ageloop
if remaining_paper > 0.0:
for x in self.district_list:
ind_paper_balance[x] += remaining_paper/len(self.district_list)
for x in self.district_list:
self.projected_supply[x][key] += ind_paper_balance[x]
self.current_balance[x][key] += (max(ind_paper_balance[x] + self.carryover[x][key] + self.turnback_pool[x][key] - self.deliveries[x][key][wateryear], 0.0) - max(self.carryover[x][key] + self.turnback_pool[x][key] - self.deliveries[x][key][wateryear], 0.0))
self.current_balance[x][key] = min(self.projected_supply[x][key],self.current_balance[x][key])
total_carryover = 0.0
for x in self.district_list:
total_carryover += max(self.projected_supply[x][key] - self.annualdemand[x], 0.0)
return total_carryover
def apply_paper_balance_urban(self, key, wyt, wateryear):
#ytd_deliveries = {}
#remaining_projected = {}
#remaining_paper = self.paper_balance[key]
#ind_paper_balance = {}
#total_remaining = 0.0
#num_districts = 0.0
total_used = 0.0
if self.paper_balance[key] > 0.0:
for xx in self.district_list:
if self.projected_supply[xx][key] < 0.0:
total_used_int = min(self.paper_balance[key] - total_used, self.projected_supply[xx][key]*(-1.0))
self.projected_supply[xx][key] += total_used_int
total_used += total_used_int
if self.paper_balance[key] > total_used:
for xx in self.district_list:
self.projected_supply[xx][key] += (self.paper_balance[key] - total_used)/len(self.district_list)
else:
for xx in self.district_list:
if self.projected_supply[xx][key] > 0.0:
total_used_int = min(total_used - self.paper_balance[key], self.projected_supply[xx][key])
self.projected_supply[xx][key] -= total_used_int
total_used += total_used_int
if self.paper_balance[key]*(-1.0) < total_used:
for xx in self.district_list:
self.projected_supply[xx][key] += (self.paper_balance[key] + total_used)/len(self.district_list)
total_carryover = 0.0
for x in self.district_list:
total_carryover += max(self.projected_supply[x][key] - self.annualdemand[x], 0.0)
#for x in self.district_list:
#ytd_deliveries[x] = self.deliveries[x][key][wateryear] - self.deliveries[x]['recharged'][wateryear]
#ind_paper_balance[x] = 0.0
#if self.projected_supply[x][key] < 0.0:
#ind_paper_balance[x] = min(self.projected_supply[x][key]*-1.0, remaining_paper)
#remaining_paper = max(self.projected_supply[x][key] + remaining_paper, 0.0)
#remaining_projected[x] = self.projected_supply[x][key] + ind_paper_balance[x]
#total_remaining += remaining_projected[x]
#num_districts += 1.0
#target_projected = (total_remaining + remaining_paper)/num_districts
#if remaining_paper > 0.0:
#for x in self.district_list:
#extra_balance = min(max(target_projected- remaining_projected[x], 0.0), remaining_paper)
#ind_paper_balance[x] += extra_balance
#remaining_paper -= extra_balance
#for x in self.district_list:
#self.projected_supply[x][key] += ind_paper_balance[x]
#self.current_balance[x][key] += (max(ind_paper_balance[x] + self.carryover[x][key] + self.turnback_pool[x][key] - self.deliveries[x][key][wateryear], 0.0) - max(self.carryover[x][key] + self.turnback_pool[x][key] - self.deliveries[x][key][wateryear], 0.0))
#self.current_balance[x][key] = min(self.projected_supply[x][key],self.current_balance[x][key])
#total_carryover = 0.0
#for x in self.district_list:
#total_carryover += max(self.projected_supply[x][key] - self.annualdemand[x], 0.0)
return total_carryover
def calc_carryover(self, existing_balance, wateryear, balance_type, key, district_name, project_contract, rights):
#at the end of each wateryear, we tally up the full allocation to the contract, how much was used (and moved around in other balances - carryover, 'paper balance' and turnback_pools) to figure out how much each district can 'carryover' to the next year
if balance_type == 'contract':
annual_allocation = existing_balance*project_contract[key]*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
max_carryover = self.contract_carryover_list[district_name][key]
elif balance_type == 'right':
annual_allocation = existing_balance*rights[key]['capacity']*self.private_fraction[district_name][wateryear] - self.deliveries[district_name][key][wateryear] + self.carryover[district_name][key] + self.turnback_pool[district_name][key]
max_carryover = self.contract_carryover_list[district_name][key]
self.carryover[district_name][key] = annual_allocation
self.turnback_pool[district_name][key] = 0.0
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECHARGE/RECOVERY TRIGGERS#########################################################
#####################################################################################################################
def open_recovery(self, t, dowy, wyt, wateryear, use_delivery_tolerance, additional_carryover):
#this function determines if a district wants to recover banked water
#based on their demands and existing supplies
if use_delivery_tolerance:
risk_index = int(np.floor(self.banking_risk_level*len(self.delivery_risk)))
at_risk_years = self.delivery_risk < self.total_banked_storage*-1.0
new_values = self.delivery_risk[at_risk_years]
new_years = self.delivery_risk_rate[at_risk_years]
if (len(new_values)-1) < risk_index:
delivery_tolerance = 0.0
else:
rates = np.zeros(len(new_values))
for x in range(0, len(new_values)):
rates[x] = (new_values[x] + self.total_banked_storage)/new_years[x]
sorted_rates = np.sort(rates)
delivery_tolerance = sorted_rates[risk_index]*-1.0
else:
delivery_tolerance = 0.0
self.target_annual_demand = 999999.9*np.ones(self.number_years)
#for district in self.district_list:
#self.target_annual_demand += self.annual_pumping[district][wateryear]
total_balance = 0.0
total_deliveries = 0.0
total_needs = 0.0
for district in self.district_list:
for contract in self.contract_list:
total_balance += self.projected_supply[district][contract]
total_deliveries += self.deliveries[district][contract][wateryear]
total_deliveries += self.deliveries[district]['recover_banked'][wateryear]
total_needs += self.annualdemand[district]*self.seepage[district]
#total_recovery = (366-dowy)*self.max_recovery + self.max_leiu_exchange
total_recovery = (366-dowy)*self.max_recovery + self.max_leiu_exchange
total_needs += additional_carryover
self.daily_supplies['recovery_cap'][t] = total_recovery
if (total_balance + total_recovery) < total_needs and (total_balance + total_deliveries) < (self.target_annual_demand[wateryear] - delivery_tolerance):
self.use_recovery = 1.0
else:
self.use_recovery = 0.0
def open_recharge(self,t,m,da,wateryear,year,numdays_fillup, numdays_fillup2, key, wyt, reachable_turnouts, additional_carryover, contract_allocation):
#for a given contract owned by the district (key), how much recharge can they expect to be able to use
#before the reservoir associated w/ that contract fills to the point where it needs to begin spilling water
#(numdays_fillup) - i.e., how much surface water storage can we keep before start losing it
#self.recharge_carryover is the district variable that shows how much 'excess' allocation there is on a particular
#contract - i.e., how much of the allocation will not be able to be recharged before the reservoir spills
total_recharge = 0.0
total_recharge2 = 0.0
carryover_storage_proj = 0.0
spill_release_carryover = 0.0
service_area_adjust = {}
for x in self.district_list:
is_reachable = 0
for turnout in reachable_turnouts:
for y in self.turnout_list[x]:
if y == turnout:
is_reachable = 1
break
if is_reachable == 1:
break
if is_reachable == 1:
service_area_adjust[x] = 1.0
else:
service_area_adjust[x] = 0.0
adjusted_sw_sa = service_area_adjust
#for x in self.district_list:
#spill_release_carryover += max(self.projected_supply[x][key] - self.annualdemand[x]*adjusted_sw_sa[x], 0.0)
###Find projected recharge available to district
#if spill_release_carryover > 0.0:
#total_recharge_capacity = (self.max_direct_recharge[0] + self.max_leiu_recharge[m])*(self.days_in_month[year][m]-da)
##calculate both direct & in leiu recharge available to the district through the end of this water year
#if m < 8:
#for future_month in range(m+1,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year+1][future_month]
#elif m > 8:
#for future_month in range(m+1,12):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year+1][future_month]
#for future_month in range(0,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year+1][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year+1][future_month]
#else:
#total_recharge_capacity = 0.0
#spill_release_carryover -= total_recharge_capacity
#spill_release_carryover -= additional_carryover
#spill_release_carryover = max(spill_release_carryover, 0.0)
if numdays_fillup < 365.0:
##how many days remain before the reservoir fills?
days_left = numdays_fillup
#tabulate how much water can be recharged between now & reservoir fillup (current month)
this_month_recharge = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left)
total_recharge += this_month_recharge
#days before fillup remaining after current month
days_left -= (self.days_in_month[year][m] - da)
days_left = numdays_fillup
days_left2 = numdays_fillup2
#tabulate how much water can be recharged between now & reservoir fillup (current month)
this_month_recharge = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left)
this_month_recharge2 = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left2)
total_recharge += this_month_recharge
total_recharge2 += this_month_recharge2
#days before fillup remaining after current month
days_left -= (self.days_in_month[year][m] - da)
days_left2 -= (self.days_in_month[year][m] - da)
###if days_left remains positive (i.e., reservoir fills after the end of the current month)
###loop through future months to determine how much water can be recharged before reservoir fills
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left)
total_recharge += this_month_recharge
days_left -= self.days_in_month[year+next_year_counter][m+monthcounter]
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left2 > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge2 = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left2)
total_recharge2 += this_month_recharge2
days_left2 -= self.days_in_month[year+next_year_counter][m+monthcounter]
###Uses the projected supply calculation to determine when to recharge water. There are a number of conditions under which a
###district will recharge water. Projected allocations are compared to total demand, recharge capacity, and the probability of
###surface water storage spilling carryover water. If any of these conditions triggers recharge, the district will release water
##for recharge
spill_release_carryover = 0.0
for xx in self.district_list:
for y in self.contract_list:
spill_release_carryover += max(self.projected_supply[xx][y] - self.annualdemand[xx]*service_area_adjust[xx] - self.carryover_rights[xx][y], 0.0)
spill_release_carryover -= (total_recharge2 + self.demand_days['lookahead'][key])
spill_release_carryover = max(spill_release_carryover, 0.0)
carryover_storage_proj = 0.0
for xx in self.district_list:
for y in self.contract_list:
carryover_storage_proj += max(self.carryover[xx][y] - self.deliveries[xx][y][wateryear] - self.carryover_rights[xx][y], 0.0)
carryover_storage_proj -= (total_recharge + self.demand_days['current'][key])
carryover_storage_proj = max(carryover_storage_proj, 0.0)
#carryover_release_proj = min(carryover_storage_proj, max(total_recharge_available - total_recharge_capacity,0.0))
#carryover_release_current = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge_carryover, 0.0)
#if contract_carryover > 0.0:
#spill_release_carryover = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge, 0.0)
#else:
#spill_release_carryover = max(self.projected_supply[key] - self.annualdemand*adjusted_sw_sa - total_recharge*service_area_adjust - self.contract_carryover_list[key]*adjusted_sw_sa, 0.0)
district_fracs = {}
if spill_release_carryover > carryover_storage_proj:
total_available_for_recharge = 0.0
for xx in self.district_list:
for y in self.contract_list:
total_available_for_recharge += max(self.projected_supply[xx][y], 0.0)
total_available_for_recharge -= self.annualdemand[xx]
if total_available_for_recharge > 0.0:
for xx in self.district_list:
district_fracs[xx] = max(self.projected_supply[xx][key] - self.annualdemand[xx], 0.0)/total_available_for_recharge
self.recharge_carryover[xx][key] = max(spill_release_carryover, 0.0)*district_fracs[xx]
else:
for xx in self.district_list:
self.recharge_carryover[key] = 0.0
else:
total_available_for_recharge = 0.0
for xx in self.district_list:
for y in self.contract_list:
total_available_for_recharge += max(self.carryover[xx][y] - self.deliveries[xx][y][wateryear], 0.0)
if total_available_for_recharge > 0.0:
for xx in self.district_list:
district_fracs[xx] = max(self.carryover[xx][key] - self.deliveries[xx][key][wateryear], 0.0)/total_available_for_recharge
self.recharge_carryover[xx][key] = max(carryover_storage_proj, 0.0)*district_fracs[xx]
else:
for xx in self.district_list:
self.recharge_carryover[xx][key] = 0.0
if contract_allocation == 0:
total_secondary_contract = 0.0
for xx in self.district_list:
total_secondary_contract += self.projected_supply[xx][key]
if total_secondary_contract > 0.0:
for xx in self.district_list:
self.recharge_carryover[xx][key] = max(self.recharge_carryover[xx][key], (total_secondary_contract - total_recharge - self.demand_days['current'][key])*(self.projected_supply[xx][key]/total_secondary_contract), 0.0)
else:
for x in self.district_list:
self.recharge_carryover[x][key] = 0.0
self.daily_supplies['numdays'][t] = numdays_fillup
self.daily_supplies['recharge_cap'][t] = total_recharge
def get_urban_recovery_target(self, pumping, project_contract, wateryear, dowy, year, wyt, demand_days, t, district, start_month):
max_pumping_shortfall = 0.0
pumping_shortfall = 0.0
monthcounter = start_month
daycounter = 0
tot_days = 0
if demand_days > 365.0:
max_pumping_shortfall = 999.9
else:
while tot_days < demand_days:
pumping_shortfall += np.sum(self.pumping[district][(t-dowy+tot_days):(t-dowy+tot_days+min(demand_days - tot_days, 30))]/1000.0) - pumping['swp']['gains'][monthcounter]*project_contract*self.private_fraction[district][wateryear]
tot_days += 30
monthcounter += 1
if monthcounter == 12:
monthcounter = 0
max_pumping_shortfall = max(pumping_shortfall, max_pumping_shortfall)
return max(max_pumping_shortfall, 0.0)
def set_turnback_pool(self, key, year, additional_carryover):
##This function creates the 'turnback pool' (note: only for SWP contracts now, can be used for others)
##finding contractors with 'extra' contract water that they would like to sell, and contractors who would
##like to purchase that water.
self.turnback_sales = 0.0
self.turnback_purchases = 0.0
total_recharge_ability = 0.0
total_projected_supply = 0.0
for y in self.contract_list:
for xx in self.district_list:
total_projected_supply += self.projected_supply[xx][y]
for month_count in range(0, 4):
# total recharge Jun,Jul,Aug,Sep
total_recharge_ability += self.max_direct_recharge[month_count]*self.days_in_month[year][month_count + 5]
contract_fraction = 0.0
if total_projected_supply > 0.0:
for xx in self.district_list:
contract_fraction += max(min(self.projected_supply[xx][key]/total_projected_supply, 1.0), 0.0)
else:
contract_fraction = 0.0
#districts sell water if their projected contracts are greater than their remaining annual demand, plus their remaining recharge capacity in this water year, plus their recharge capacity in the next water year (through January)
self.turnback_sales = {}
self.turnback_purchases = {}
for x in self.district_list:
self.turnback_sales[x] = 0.0
self.turnback_purchases[x] = 0.0
self.turnback_sales[x] += max(self.projected_supply[xx][key] - (self.annualdemand[xx] + total_recharge_ability + additional_carryover)*contract_fraction, 0.0)
self.turnback_purchases[x] += max(self.annualdemand[xx]*contract_fraction - self.projected_supply[xx][key] - self.max_recovery*122*contract_fraction, 0.0)
return self.turnback_sales, self.turnback_purchases
def make_turnback_purchases(self, turnback_sellers, turnback_buyers, key):
#once we know how much water is in the 'selling' pool and the 'buying' pool, we can determine the total turnback pool - min(buying,selling), then
#determine what % of each request is filled (i.e., if the selling pool is only 1/2 of the buying pool, then buyers only get 1/2 of their request, or visa versa
if min(turnback_sellers, turnback_buyers) > 0.0:
sellers_frac = -1*min(turnback_sellers, turnback_buyers)/turnback_sellers
buyers_frac = min(turnback_sellers, turnback_buyers)/turnback_buyers
for x in self.district_list:
if self.turnback_sales[x] > 0.0:
self.turnback_pool[x][key] = max(self.turnback_sales[x], 0.0)*sellers_frac
elif self.turnback_purchases[x] > 0.0:
self.turnback_pool[x][key] = max(self.turnback_purchases[x], 0.0)*buyers_frac
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################DETERMINE DELIVERIES ON CANAL######################################################
#####################################################################################################################
def find_node_demand(self,contract_list, search_type, district_name):
#this function is used to calculate the current demand at each 'district' node
access_mult = self.seepage[district_name]#this accounts for water seepage & the total district area that can be reached by SW canals (seepage is >= 1.0; surface_water_sa <= 1.0)
total_projected_allocation = 0.0
for y in contract_list:
total_projected_allocation += max(self.projected_supply[district_name][y.name], 0.0)#projected allocation
#percentage of demand filled in the day is equal to the total projected allocation as a percent of annual demand
#(i.e., if allocations are projected to be 1/2 of annual demand, then they try to fill 50% of daily irrigation demands with surface water
total_demand_met = 1.0
#self.dailydemand_start is the initial daily district demand (self.dailydemand is updated as deliveries are made) - we try to fill the total_demand_met fraction of dailydemand_start, or what remains of demand in self.dailydemand, whichever is smaller
if search_type == 'flood':
if self.annualdemand[district_name] > 0.0 and total_projected_allocation > self.annualdemand[district_name]:
demand_constraint = (1.0 - min(total_projected_allocation/self.annualdemand[district_name], 1.0))*max(min(self.dailydemand_start[district_name]*access_mult*total_demand_met, self.dailydemand[district_name]*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start[district_name]*access_mult*total_demand_met, self.dailydemand[district_name]*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start[district_name]*access_mult*total_demand_met, self.dailydemand[district_name]*access_mult),0.0)
#if we want to include recharge demands in the demand calculations, add available recharge space
return demand_constraint
def set_request_to_district(self, demand, search_type, contract_list, bank_space, dowy, district_name):
#this function is used to determine if a district node 'wants' to make a request
#under the different usage types (flood, delievery, banking, or recovery) under a given contract
#(contract_list)
self.projected_supply[district_name]['tot'] = 0.0
for y in self.current_balance[district_name]:
self.projected_supply[district_name]['tot'] += self.projected_supply[district_name][y]
#for flood deliveries, a district requests water if they don't have
#excess contract water that they don't think they can recharge (i.e. they don't purchase
#flood water if they can't use all their contract water
if search_type == "flood":
return demand
#for y in contract_list:
#tot_recharge += self.delivery_carryover[y.name]
#if tot_recharge <= 0.0:
#return demand
#else:
#return 0.0
#for normal irrigation deliveries, a district requests water if they have enough water currently
#in surface water storage under the given contract
if search_type == "delivery":
total_current_balance = 0.0
total_projected_supply = 0.0
for y in contract_list:
total_current_balance += max(self.current_balance[district_name][y.name], 0.0)
total_projected_supply += max(self.projected_supply[district_name][y.name], 0.0)
if self.seasonal_connection[district_name] == 1:
if self.must_fill == 1:
return min(total_current_balance, total_projected_supply)
else:
return min(total_current_balance, total_projected_supply)
else:
return 0.0
else:
return 0.0
def set_request_constraints(self, demand, search_type, contract_list, bank_space, bank_capacity, dowy, wateryear):
#for banking, a district requests water if they have enough contract water currently in surface water storage and they have 'excess' water for banking (calculated in self.open_recharge)
if search_type == "banking":
total_carryover_recharge = 0.0
total_current_balance = 0.0
for x in self.district_list:
for y in contract_list:
total_carryover_recharge += max(self.recharge_carryover[x][y.name], 0.0)
total_current_balance += max(self.current_balance[x][y.name], 0.0)
return min(total_carryover_recharge, total_current_balance, max(bank_capacity - bank_space, 0.0))
#for recovery, a district requests recovery water from a bank if they have contracts under the current contract being searched (i.e., so they aren't requesting water that will be sent to another district that can't make 'paper' trades with them) and if they have their 'recovery threshold' triggered (self.use_recovery, calculated in self.open_recovery)
if search_type == "recovery":
member_trades = 0
for member_contracts in self.contract_list:
for exchange_contracts in contract_list:
if member_contracts == exchange_contracts.name:
member_trades = 1
if member_trades == 1:
if self.use_recovery == 1.0:
for x in self.district_list:
total_request = min(max(self.dailydemand[x]*self.seepage[x], 0.0), max(bank_space, 0.0))
else:
total_request = 0.0
else:
total_request = 0.0
return total_request
if search_type == "flood":
return demand
return total_request
def set_demand_priority(self, priority_list, contract_list, demand, delivery, demand_constraint, search_type, contract_canal):
#this function takes a the calculated demand at each district node and classifies those demands by 'priority' - the priority classes and rules change for each delivery type
demand_dict = {}
#for flood deliveries, the priority structure is based on if you have a contract with the reservoir that is being spilled, if you have a turnout on a canal that is a 'priority canal' for the spilling reservoir, and then finally if you are not on a priority canal for spilling
if search_type == 'flood':
contractor_toggle = 0
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
for y in contract_list:#contracts that are being spilled (b/c they are held at the spilling reservoir)
for yx in self.contract_list:
if y.name == yx:
contractor_toggle = 1
if contractor_toggle == 1:
demand_dict['contractor'] = max(min(demand,delivery), 0.0)
demand_dict['alternate'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['contractor'])
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = max(min(demand,delivery), 0.0)
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = max(min(demand,delivery), 0.0)
#irrigation deliveries have only one type of priority (the contract that is currently being deliveried)
elif search_type == 'delivery':
demand_dict[contract_canal] = max(min(demand,delivery), 0.0)
#in-leiu banks have demands that are either priority (capacity that the district has direct ownership over) or secondary (excess capacity that isn't being used by the owner)
elif search_type == 'banking':
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
demand_dict['priority'] = max(min(demand,delivery), 0.0)
demand_dict['secondary'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['priority'])
else:
demand_dict['priority'] = 0.0
demand_dict['secondary'] = max(min(delivery, demand_constraint), 0.0)
#recovery is the same priority structure as banking, but we use different names (initial & supplemental) to keep things straight)
elif search_type == 'recovery':
demand_dict['initial'] = max(min(demand,delivery), 0.0)
demand_dict['supplemental'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['initial'])
return demand_dict
def find_leiu_priority_space(self, demand_constraint, num_members, member_name, toggle_recharge, search_type):
#this function finds how much 'priority' space in the recharge/recovery capacity is owned by a member (member_name) in a given in-leiu bank (i.e. this function is attached to the district that owns the bank - and the banking member is represented by 'member_name' input variable)
if search_type == "recovery":
priority_space = max(min(self.leiu_recovery*self.leiu_ownership[member_name] - self.recovery_use[member_name], demand_constraint), 0.0)
available_banked = self.inleiubanked[member_name]
return min(priority_space, available_banked)
else:
initial_capacity = self.dailydemand_start*self.surface_water_sa*self.seepage
if toggle_recharge == 1:
initial_capacity += self.in_district_storage
priority_space = max(min((self.leiu_ownership[member_name]*initial_capacity - self.bank_deliveries[member_name]), demand_constraint)/num_members, 0.0)
return priority_space
def set_deliveries(self, priorities,type_fractions,type_list,toggle_district_recharge,member_name, wateryear):
#this function takes the deliveries, seperated by priority, and updates the district's daily demand and/or recharge storage
final_deliveries = 0.0
for zz in type_list:
#deliveries at this priority level
total_deliveries = priorities[zz]*type_fractions[zz]
#running total of all deliveries at this node
final_deliveries += total_deliveries
#deliveries first go to direct irrigation, if demand remains
total_direct_deliveries = min(total_deliveries/self.seepage, self.dailydemand*self.surface_water_sa)
#if deliveries are for recharge, send remaining deliveries to recharge
if toggle_district_recharge == 1:
total_recharge_deliveries = max(total_deliveries/self.seepage - self.dailydemand*self.surface_water_sa, 0.0)
else:
total_recharge_deliveries = 0.0
#adjust demand/recharge space
self.dailydemand -= total_direct_deliveries
self.current_recharge_storage += total_recharge_deliveries
return final_deliveries
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################ADJUSST ACCOUNTS AFTER DELIVERY####################################################
#####################################################################################################################
def give_paper_trade(self, trade_amount, contract_list, wateryear, district):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
if self.seepage[district] > 0.0:
total_alloc = 0.0
for y in contract_list:
total_alloc += self.projected_supply[district][y.name]
actual_delivery = max(min(trade_amount, total_alloc, self.dailydemand[district]*self.seepage[district]), 0.0)
self.dailydemand[district] -= actual_delivery/self.seepage[district]
if total_alloc > 0.0:
for y in contract_list:
self.paper_balance[y.name] -= actual_delivery*self.projected_supply[district][y.name]/total_alloc
self.deliveries[district]['exchanged_SW'][wateryear] += actual_delivery
return actual_delivery
def give_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear, district_name):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
contract_counter = 0
for y in contract_list:
self.paper_balance[y.name] -= trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries[district_name]['exchanged_SW'][wateryear] += trade_amount
def get_paper_trade(self, trade_amount, contract_list, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
for y in contract_list:
for x in self.district_list:
total_alloc += self.projected_supply[x][y.name]
if total_alloc > 0.0:
for y in contract_list:
for x in self.district_list:
self.paper_balance[y.name] += trade_amount*self.projected_supply[x][y.name]/total_alloc
else:
contract_frac = 1.0
for y in contract_list:
self.paper_balance[y.name] += trade_amount*contract_frac
contract_frac = 0.0
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def get_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
contract_counter = 0
for y in contract_list:
self.paper_balance[y] += trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def record_direct_delivery(self, delivery, wateryear, district):
actual_delivery = min(delivery, self.dailydemand[district]*self.seepage[district])
self.deliveries[district]['recover_banked'][wateryear] += actual_delivery
self.dailydemand[district] -= actual_delivery/(self.seepage[district])
self.direct_recovery_delivery[district] = 0.0
return actual_delivery
def direct_delivery_bank(self, delivery, wateryear, district):
#this function takes a delivery of recoverd groundwater and applies it to irrigation demand in a district
#the recovered groundwater is delivered to the district that originally owned the water, so no 'paper' trade is needed
actual_delivery = min(delivery, self.dailydemand[district]*self.seepage[district] - self.direct_recovery_delivery[district])
self.direct_recovery_delivery[district] += actual_delivery
#self.deliveries[district]['recover_banked'][wateryear] += actual_delivery
#self.dailydemand[district] -= actual_delivery/self.seepage[district]*self.surface_water_sa[district]
return actual_delivery
def adjust_accounts(self, direct_deliveries, recharge_deliveries, contract_list, search_type, wateryear):
#this function accepts water under a specific condition (flood, irrigation delivery, banking), and
#adjusts the proper accounting balances
total_recharge_balance = 0.0
total_current_balance = 0.0
delivery_by_contract = {}
for y in contract_list:
for x in self.district_list:
total_current_balance += max(self.current_balance[x][y.name], 0.0)
total_recharge_balance += max(self.recharge_carryover[x][y.name], 0.0)
delivery_by_contract[y.name] = 0.0
flood_counter = 0
for y in contract_list:
#find the percentage of total deliveries that come from each contract
contract_deliveries = 0.0
if search_type == 'flood':
if flood_counter == 0:
contract_deliveries = (direct_deliveries + recharge_deliveries)
flood_counter = 1
else:
contract_deliveries = 0.0
elif total_current_balance > 0.0:
for x in self.district_list:
if search_type == 'delivery':
contract_deliveries += (direct_deliveries + recharge_deliveries)*max(self.projected_supply[x][y.name], 0.0)/total_current_balance
elif search_type == 'banking':
contract_deliveries += (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[x][y.name], 0.0)/total_current_balance
elif search_type == 'recovery':
contract_deliveries += (direct_deliveries + recharge_deliveries)*max(self.current_balance[x][y.name], 0.0)/total_current_balance
else:
contract_deliveries = 0.0
delivery_by_contract[y.name] = contract_deliveries
#flood deliveries do not count against a district's contract allocation, so the deliveries are recorded as 'flood'
if search_type == "flood":
for x in self.district_list:
if contract_deliveries > 0.0:
self.deliveries[x][y.name + '_flood'][wateryear] += recharge_deliveries
self.deliveries[x][y.name + '_flood_irrigation'][wateryear] += direct_deliveries
else:
#irrigation/banking deliveries are recorded under the contract name so they are included in the
#contract balance calculations
#update the individual district accounts
if search_type == 'banking':
for x in self.district_list:
if total_recharge_balance > 0.0:
self.deliveries[x][y.name][wateryear] += (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[x][y.name], 0.0)/total_recharge_balance
self.current_balance[x][y.name] -= (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[x][y.name], 0.0)/total_recharge_balance
self.deliveries[x]['recharged'][wateryear] += (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[x][y.name], 0.0)/total_recharge_balance
self.recharge_carryover[x][y.name] -= min((direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[x][y.name], 0.0)/total_recharge_balance, self.recharge_carryover[x][y.name])
else:
for x in self.district_list:
if total_current_balance > 0.0:
self.deliveries[x][y.name][wateryear] += (direct_deliveries + recharge_deliveries)*max(self.current_balance[x][y.name], 0.0)/total_current_balance
self.current_balance[x][y.name] -= (direct_deliveries + recharge_deliveries)*max(self.current_balance[x][y.name], 0.0)/total_current_balance
return delivery_by_contract
def adjust_account_district(self, actual_deliveries, contract_list, search_type, wateryear, district_name):
total_current_balance = 0.0
delivery_by_contract = {}
for y in contract_list:
if search_type == 'flood':
total_current_balance += 1.0
elif search_type == 'delivery':
total_current_balance += max(self.projected_supply[district_name][y.name], 0.0)
elif search_type == 'banking':
total_current_balance += max(self.recharge_carryover[district_name][y.name], 0.0)
elif search_type == 'recovery':
total_current_balance += max(self.current_balance[district_name][y.name], 0.0)
delivery_by_contract[y.name] = 0.0
flood_counter = 0
for y in contract_list:
#find the percentage of total deliveries that come from each contract
if total_current_balance > 0.0:
if search_type == 'flood':
if flood_counter == 0:
contract_deliveries = actual_deliveries
flood_counter = 1
else:
contract_deliveries = 0.0
elif search_type == 'delivery':
contract_deliveries = actual_deliveries*max(self.projected_supply[district_name][y.name], 0.0)/total_current_balance
elif search_type == 'banking':
contract_deliveries = actual_deliveries*max(self.recharge_carryover[district_name][y.name], 0.0)/total_current_balance
elif search_type == 'recovery':
contract_deliveries = actual_deliveries*max(self.current_balance[district_name][y.name], 0.0)/total_current_balance
else:
contract_deliveries = 0.0
delivery_by_contract[y.name] = contract_deliveries
#flood deliveries do not count against a district's contract allocation, so the deliveries are recorded as 'flood'
if search_type == "flood":
self.deliveries[district_name][y.name + '_flood'][wateryear] += contract_deliveries
else:
#irrigation/banking deliveries are recorded under the contract name so they are included in the
#contract balance calculations
#update the individual district accounts
self.deliveries[district_name][y.name][wateryear] += contract_deliveries
self.current_balance[district_name][y.name] -= contract_deliveries
if search_type == 'banking':
#if deliveries ar for banking, update banking accounts
self.deliveries[district_name]['recharged'][wateryear] += contract_deliveries
self.recharge_carryover[district_name][y.name] -= min(contract_deliveries, self.recharge_carryover[district_name][y.name])
self.dailydemand[district_name] -= min(actual_deliveries/self.seepage[district_name],self.dailydemand[district_name])
return delivery_by_contract
def adjust_bank_accounts(self, member_name, deliveries, wateryear):
#when deliveries are made for banking, keep track of the member's individual accounts
self.bank_deliveries[member_name] += deliveries#keeps track of how much of the capacity is being used in the current timestep
self.deliveries['inleiu'][wateryear] += deliveries#if deliveries being made 'inleiu', then count as inleiu deliveries
self.inleiubanked[member_name] += deliveries * self.inleiuhaircut#this is the running account of the member's banking storage
def adjust_recovery(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
self.recovery_use[member_name] += deliveries#keeps track of how much of the capacity is being used in the current timestep
def adjust_exchange(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
def absorb_storage(self):
#water delivered to a bank as 'storage' (on the surface) is 'absorbed', clearing up storage space for the next timestep
#also triggers self.thismonthuse, which keeps track of how many conecutive months a recharge bank is used (and the effect on the recharge rate of the spreading pool)
if self.in_leiu_banking:
if self.current_recharge_storage > 0.0:
self.thismonthuse = 1
absorb_fraction = min(self.in_district_direct_recharge*cfs_tafd/self.current_recharge_storage,1.0)
for x in self.participant_list:
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
else:
self.thismonthuse = 1
if self.current_recharge_storage > 0.0:
absorb_fraction = min(self.recharge_rate/self.current_recharge_storage,1.0)
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
self.current_recharge_storage = max(self.current_recharge_storage, 0.0)
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECORD STATE VARIABLES###############################################################
#####################################################################################################################
def reset_recharge_recovery(self):
self.max_direct_recharge = np.zeros(12)
self.max_leiu_recharge = np.zeros(12)
self.total_banked_storage = 0.0
self.max_leiu_exchange = 0.0
def accounting_full(self, t, wateryear):
# keep track of all contract amounts
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'][t] = self.deliveries[x][wateryear]
self.daily_supplies_full[x + '_flood'][t] = self.deliveries[x + '_flood'][wateryear]
self.daily_supplies_full[x + '_projected'][t] = self.projected_supply[x]
self.daily_supplies_full[x + '_paper'][t] = self.paper_balance[x]
self.daily_supplies_full[x + '_carryover'][t] = self.carryover[x]
self.daily_supplies_full[x + '_turnback'][t] = self.turnback_pool[x]
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x][t] = self.deliveries[x][wateryear]
def accounting(self,t, da, m, wateryear,key):
#takes delivery/allocation values and builds timeseries that show what water was used for (recharge, banking, irrigation etc...)
#delivery/allocation data are set cumulatively - so that values will 'stack' in a area plot.
#Allocations are positive (stack above the x-axis in a plot)
total_projected_supply = 0.0
total_carryover = 0.0
total_recharge = 0.0
self.daily_supplies['annual_demand'][t] = 0.0
for district_name in self.district_list:
total_projected_supply += self.projected_supply[district_name][key]
total_carryover += max(self.carryover[district_name][key] - self.deliveries[district_name][key][wateryear], 0.0)
total_recharge += max(self.recharge_carryover[district_name][key], 0.0)
self.daily_supplies['annual_demand'][t] += self.annualdemand[district_name]
self.daily_supplies['contract_available'][t] += total_projected_supply
self.daily_supplies['carryover_available'][t] += total_carryover
if total_recharge > 0.0:
self.daily_supplies['use_recharge'][t] = 1.0
self.daily_supplies['use_recovery'][t] = self.use_recovery
self.daily_supplies['paper'][t] += total_projected_supply
self.daily_supplies['carryover'][t] += max(total_projected_supply - self.paper_balance[key], 0.0)
self.daily_supplies['allocation'][t] += max(total_projected_supply - total_carryover - self.paper_balance[key], 0.0)
#while deliveries are negative (stack below the x-axis in a plot) - the stacking adjustments come in self.accounting_banking_activity()
if m == 9 and da == 30:
total_deliveries = 0.0
for district_name in self.district_list:
self.annual_supplies['delivery'][wateryear] += self.deliveries[district_name][key][wateryear]
self.annual_supplies['recharge_uncontrolled'][wateryear] += self.deliveries[district_name][key + '_flood'][wateryear]
total_deliveries += self.deliveries[district_name][key][wateryear]
self.deliveries['undelivered_trades'][wateryear] += max(self.paper_balance[key] - total_deliveries, 0.0)
def accounting_banking_activity(self, t, da, m, wateryear):
#this is an adjustment for 'delivery' (the delivery values are negative, so adding 'recharged' and 'exchanged_GW' is removing them from the count for 'deliveries' - we only want deliveries for irrigation, not for recharge
#exchanged_GW is GW that has been pumped out of a bank and 'delivered' to another district. the district gets credit in the reservoir, and deliveries of SW from that reservoir are recorded as 'deliveries' - but we don't want to count that here
#exchanged_SW is GW that has been pumped out of a bank, not owned by the district, and delivered to that district (i.e., the other side of the exchanged_GW in a GW exchange). This should technically count as an irrigation delivery from a contract
#(we want to record that as delivery here) but it doesn't get recorded that way upon delivery. so we add it back here when were are recording accounts (i.e. exchanged_GW and exchanged_SW are counters to help us square the records from GW exchanges)
for x in self.district_list:
self.daily_supplies['delivery'][t] -= self.deliveries[x]['recharged'][wateryear]
self.daily_supplies['recharge_delivery'][t] += self.deliveries[x]['recharged'][wateryear]
self.daily_supplies['banked'][t] += self.deliveries[x]['recover_banked'][wateryear]
self.daily_supplies['delivery'][t] += self.deliveries[x]['exchanged_SW'][wateryear]
self.daily_supplies['leiu_delivered'][t] += self.deliveries[x]['leiupumping'][wateryear]
self.daily_supplies['leiu_accepted'][t] += self.deliveries[x]['inleiu'][wateryear]
self.daily_supplies['delivery'][t] -= self.deliveries['exchanged_GW'][wateryear]
self.daily_supplies['banked'][t] += self.deliveries['exchanged_GW'][wateryear]
self.daily_supplies['banked_storage'][t] += self.total_banked_storage
if m == 9 and da == 30:
for x in self.district_list:
self.annual_supplies['delivery'][wateryear] += self.deliveries[x]['exchanged_SW'][wateryear]
self.annual_supplies['delivery'][wateryear] -= self.deliveries[x]['recharged'][wateryear]
self.annual_supplies['banked_accepted'][wateryear] += self.deliveries[x]['recover_banked'][wateryear]
self.annual_supplies['leiu_accepted'][wateryear] += self.deliveries[x]['inleiu'][wateryear]
self.annual_supplies['leiu_delivered'][wateryear] += self.deliveries[x]['leiupumping'][wateryear]
self.annual_supplies['recharge_delivery'][wateryear] += self.deliveries[x]['recharged'][wateryear]
self.annual_supplies['delivery'][wateryear] -= self.deliveries['exchanged_GW'][wateryear]
recharged_recovery = 0.0
if self.annual_supplies['delivery'][wateryear] < 0.0:
recharged_recovery = self.annual_supplies['delivery'][wateryear]
self.annual_supplies['delivery'][wateryear] = 0.0
self.annual_supplies['banked_accepted'][wateryear] += self.deliveries['exchanged_GW'][wateryear] - self.deliveries['undelivered_trades'][wateryear]
self.annual_supplies['banked_storage'][wateryear] = self.total_banked_storage
self.annual_supplies['acreage'][wateryear] = self.total_acreage
def accounting_leiubank(self,t, m, da, wateryear):
#takes banked storage (in in-leiu banks) and builds timeseries of member accounts
stacked_amount = 0.0
self.recharge_rate_series[t] = self.recharge_rate
for x in self.participant_list:
self.bank_timeseries[x][t] = self.inleiubanked[x] + stacked_amount
stacked_amount += self.inleiubanked[x]
if m == 9 and da == 30:
for x in self.participant_list:
sum_total = 0.0
for year_counter in range(0, wateryear):
sum_total += self.annual_timeseries[x][year_counter]
self.annual_timeseries[x][wateryear] = self.inleiubanked[x] - sum_total
def accounting_as_df(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies[n], index = index)
return df
def accounting_as_df_full(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies_full:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies_full[n], index = index)
return df
def annual_results_as_df(self):
#wite annual district deliveries into a data frame
df = pd.DataFrame()
for n in self.annual_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.annual_supplies[n])
return df
def bank_as_df(self, index):
#write leiubanking accounts (plus bank recharge rates) into a dataframe
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.bank_timeseries[n], index = index)
df['%s_rate' % self.key] = pd.Series(self.recharge_rate_series, index = index)
return df
def annual_bank_as_df(self):
#write anmual banking changes into a data frame
df = | pd.DataFrame() | pandas.DataFrame |
# import packages
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# import data
df = pd.read_csv('XGB_seb/Data/data_daily.csv')
del df['Unnamed: 0']
df = df.fillna(0)
df.index = pd.to_datetime(df['date'])
dflevel = df.copy()
features = [
'price',
'volatility',
'volume_price',
'volume_number',
'positive_comment',
'neutral_comment',
'negative_comment'
]
for feat in features:
df[feat] = df[feat].diff()
df = df.dropna()
pred_day = 2901 # Predict for this day, for the next H-1 days. Note indexing of days start from 0.
H = 30 # Forecast horizon, in days.
train_val_size = int(365 * 1) # Size of train+validation set
# stationary index to datetime
# stationary.index = pd.DatetimeIndex(stationary.index).date
df.index = | pd.to_datetime(df['date']) | pandas.to_datetime |
import os
from matplotlib import pyplot as plt
from pandas import DataFrame
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
import numpy as np
from app import db
from app.base.db_models.ModelEncodedColumns import ModelEncodedColumns
from app.base.db_models.ModelFeatures import ModelFeatures
from mylib.db_helper.AttributesHelper import add_encoded_column_values
from mylib.utiles.CVSReader import get_only_file_name, get_file_path
class AdjustDataFrame:
def __init__(self, name):
self.name = name
def encode_data_frame1(data: DataFrame):
columns_name = data.columns
encoded_data = data
data_types = data.dtypes
for i in range(len(data_types)):
if data_types[i] != np.int64:
col_name = columns_name[i]
oe_style = OneHotEncoder()
oe_results = oe_style.fit_transform(data[[col_name]])
pd.DataFrame(oe_results.toarray(), columns=oe_style.categories_).head()
# encoded_data = encoded_data.join(pd.DataFrame(oe_results.toarray(), columns=oe_style.categories_))
encoded_data = encoded_data.merge(pd.DataFrame(oe_results.toarray()), how='left', left_index=True,
right_index=True)
return encoded_data
def encode_data_frame(model_id, data: DataFrame, column_type):
try:
if column_type != 'F':
return encode_labels_data_frame(model_id, data)
else:
return encode_features_data_frame(model_id, data)
except Exception as e:
print('Ohh -encode_data_frame...Something went wrong.')
print(e)
return 0
def encode_features_data_frame(model_id, data: DataFrame, column_type='F'):
columns_name = data.columns
encoded_data = []
data_types = data.dtypes
for i in range(len(data_types)):
if data_types[i] != np.int64 and data_types[i] != np.float:
col_name = columns_name[i]
dummies = | pd.get_dummies(data[[col_name]]) | pandas.get_dummies |
import pandas as pd
from baloo import Index, MultiIndex, Series, DataFrame
from .indexes.test_base import assert_index_equal
from .indexes.test_multi import assert_multiindex_equal
from .test_frame import assert_dataframe_equal
from .test_series import assert_series_equal
class TestPandasConversions(object):
def test_from_pandas_index(self, index_i64):
pandas_index = pd.Index([0, 1, 2, 3, 4])
actual = Index.from_pandas(pandas_index)
expected = index_i64
assert_index_equal(actual, expected)
def test_from_pandas_multiindex(self):
pandas_index = pd.MultiIndex.from_product([[0, 1], [2., 3.]])
actual = MultiIndex.from_pandas(pandas_index)
expected = MultiIndex([[0, 0, 1, 1], [2., 3., 2., 3.]])
assert_multiindex_equal(actual, expected)
def test_from_pandas_series(self, data_i64, series_i64):
pandas_series = pd.Series(data_i64)
actual = Series.from_pandas(pandas_series)
expected = series_i64
assert_series_equal(actual, expected)
def test_from_pandas_df(self, data_f32, df1):
pandas_df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': data_f32}, pd.Index([2, 3, 4, 5, 6]))
actual = DataFrame.from_pandas(pandas_df)
expected = df1
assert_dataframe_equal(actual, expected)
def test_to_pandas_index(self, index_i64):
actual = index_i64.to_pandas()
expected = | pd.Index([0, 1, 2, 3, 4]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 23:51:08 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
from functools import partial
from american_option_pricing import american_option
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('data_v3.xlsx', index_col=None)
current_date = date(2020,7,24)
expiry_date = date(2020,8,7)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
max_quantity_per_leg = 5
min_e_pnl = 0
min_p_profit = 30
max_cost = 750
max_loss = 750
mode = "rule_based" #"all_combinations"/"rule_based" - Always keep rule based
save_results = False
Strategies = ["Bear Call Spread","Bull Call Spread", \
"Bull Put Spread", "Bear Put Spread",\
"Bull Put Ladder", "Bear Call Ladder",\
"Long Straddle", "Long Strangle", \
"Long Strap", "Long Strip",\
# "Short Straddle", "Short Strangle", \
"Long Call Butterfly", "Long Put Butterfly",\
"Short Call Butterfly", "Short Put Butterfly",\
"Long Iron Butterfly", "Short Iron Butterfly",\
"Long Call Condor", "Long Put Condor", \
"Short Call Condor", "Short Put Condor", \
"Long Iron Condor", "Short Iron Condor", \
"Long Box"\
]
Strategies = []
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
#rf_eod_data = yf.download("^IRX", start="1993-01-01", end="2019-11-15")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = | pd.to_numeric(rf_eod_data[col],errors='coerce') | pandas.to_numeric |
from ensembler.datasets.helpers import sample_dataset
import pandas as pd
import uuid
import os
class TestSampleDataFrame:
def test_sample_single_class(self):
df = pd.DataFrame([{
"sample": uuid.uuid4(),
"background": 0.1,
"1": 0.9
}, {
"sample": uuid.uuid4(),
"background": 0.1,
"1": 0.9
}])
result = sample_dataset(df)
assert len(result) == 2
assert result.iloc[0]["sample"] == df.iloc[0]["sample"]
assert result.iloc[1]["sample"] == df.iloc[1]["sample"]
def test_sample_single_class_one_sample(self):
df = pd.DataFrame([{
"sample": uuid.uuid4(),
"background": 1,
"1": 0
}, {
"sample": uuid.uuid4(),
"background": 0.1,
"1": 1
}])
result = sample_dataset(df)
assert len(result) == 1
assert result.iloc[0]["sample"] == df.iloc[1]["sample"]
def test_sample_single_class_many_samples(self):
df = | pd.DataFrame() | pandas.DataFrame |
import pytest
import pandas as pd
from numpy.testing import assert_equal
from pandas.testing import assert_series_equal, assert_index_equal
from siuba.dply.forcats import fct_recode, fct_collapse
@pytest.fixture
def series1():
yield pd.Series(["a", "b", "c", "d"])
@pytest.fixture
def cat1():
yield pd.Categorical(["a", "b", "c", "d"])
# Need to ensure all functions...
# - 1. can take a series or array
# - 2. handle a symbolic or call
# - 3. handle names with spaces
# just a little shorter to write...
def factor(values, categories, ordered = False):
return pd.Categorical(values, categories, ordered)
def assert_cat_equal(a, b):
assert isinstance(a, pd.Categorical)
assert isinstance(b, pd.Categorical)
| assert_index_equal(a.categories, b.categories) | pandas.testing.assert_index_equal |
# reference: https://www.kaggle.com/c/nyc-taxi-trip-duration
# reference: http://www.faqs.org/faqs/ai-faq/neural-nets/part1/preamble.html
# reference: https://www.kaggle.com/mathijs/weather-data-in-new-york-city-2016
# reference: https://www.kaggle.com/oscarleo/new-york-city-taxi-with-osrm/data
# feature analysis: https://www.kaggle.com/headsortails/nyc-taxi-eda-update-the-fast-the-curious
import os
from taxinet import TaxiNet, TaxiCombinerNet
import torch
import pandas as pd
import numpy as np
from datetime import datetime
from math import sin, cos, sqrt, atan2, radians
from torch.autograd import Variable
from random import random
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
import xgbhelpers as h
import xgboost as xgb
# TODO args feature
RUN_FEATURE_EXTRACTION = False
MAX_DISTANCE = 100 * 10**3 # 100 km
MAX_DURATION = 24 * 60 * 60 # 24 hours
ENSEMBLE_COUNT = 2
# ===============================
# Date extraction
# ===============================
if (RUN_FEATURE_EXTRACTION):
# read data
test = pd.read_csv('./data/test.csv')
train = pd.read_csv('./data/train.csv')
# label data
test['set'] = 'test'
train['set'] = 'train'
# instantiate the loss column in the test set so that schemas match
test['trip_duration'] = np.NaN
# union `join='outer'` the train and test data so that encoding can be done holistically
# and reset the index to be monotically increasing
combined = | pd.concat([test, train], join='outer') | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 14:46:22 2021
@author: emmastanley
"""
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
#%%
#Plot all TCAV results regardless of significance
layers = ['mixed0', 'mixed2', 'mixed4', 'mixed6', 'mixed8', 'mixed10']
sns.set(style="whitegrid")
# classes = ['class0', 'class1', 'class2', 'class3', 'class4']
classes = ['class4']
for classname in classes:
data = pd.read_csv('/Users/emmastanley/Documents/BME/Research/DR/TCAV/Results/Exp4/model 14 test/exp4MODEL14_'+ classname+ '_model13full_correct_eyepacs.csv', index_col=0)
concepts = ['hemmorhage_cropped', 'microaneursym_cropped', 'hardexudate_cropped', 'tortuous_cropped', 'softexudate_cropped']
# concepts = ['hemmorhage_full', 'microaneursym_full', 'hardexudate_full', 'tortuous_full', 'softexudate_full']
for concept in concepts :
df = data.loc[data['concept'] == concept]
df['TCAV scores'] = df['TCAV scores'].str.split(',') #convert each list of tcav scores into an actual python list object
for index, row in df.iterrows():
lst = row['TCAV scores']
new_lst = [s.strip('[]') for s in lst]
df.at[index, 'TCAV scores'] = new_lst
df_full = df.explode('TCAV scores', ignore_index=True) #expand each list to its own column
df_full['TCAV scores']=pd.to_numeric(df_full['TCAV scores']) #convert all tcav values to numeric
sns.boxplot(x='layer', y='TCAV scores', data=df_full).set_title(classname+' '+concept) #plot!!!! finally
plt.show()
sns.stripplot(x='layer', y='TCAV scores', data=df_full).set_title(classname+' '+concept)
plt.show()
#%%
#plot only layers with significant p-values
layers = ['mixed0', 'mixed2', 'mixed4', 'mixed6', 'mixed8', 'mixed10']
sns.set(style="whitegrid")
# classes = ['class0', 'class1', 'class2', 'class3', 'class4']
classes = ['class4']
concepts = ['hemmorhage_cropped', 'microaneursym_cropped', 'hardexudate_cropped', 'tortuous_cropped', 'softexudate_cropped']
for classname in classes:
data = pd.read_csv('/Users/emmastanley/Documents/BME/Research/DR/TCAV/Results/Exp4/model test/exp4MODELTEST_'+ classname+ '_model13full_correct_eyepacs.csv', index_col=0)
#drop insignificant rows
data = data.drop(data[data['p val'] > 0.05].index)
concepts = ['hemmorhage_cropped', 'microaneursym_cropped', 'hardexudate_cropped', 'tortuous_cropped', 'softexudate_cropped']
# concepts = ['hemmorhage_full', 'microaneursym_full', 'hardexudate_full', 'tortuous_full', 'softexudate_full']
for concept in concepts :
df = data.loc[data['concept'] == concept]
df['TCAV scores'] = df['TCAV scores'].str.split(',') #convert each list of tcav scores into an actual python list object
for index, row in df.iterrows():
lst = row['TCAV scores']
new_lst = [s.strip('[]') for s in lst]
df.at[index, 'TCAV scores'] = new_lst
df_full = df.explode('TCAV scores', ignore_index=True) #expand each list to its own column
df_full['TCAV scores']= | pd.to_numeric(df_full['TCAV scores']) | pandas.to_numeric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.