prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import itertools
import sys
path_metadata = str(sys.argv[1])
path_snpmat_standard = str(sys.argv[2])
path_snpmat_nogaps = str(sys.argv[3])
path_alnlengths = str(sys.argv[4])
path_output = str(sys.argv[5])
# open list_strains.txt as list_strains
metadata = pd.read_csv(path_metadata, sep = '\t')
list_strains = list(metadata.strain)
# use itertools to make all unique combinations of list_strains with itself, convert to dataframe and give column names
combos = pd.DataFrame(list(itertools.combinations(list_strains,2)), columns = ['strain1', 'strain2'])
# match metadata values to the combinations in the first column of combos (strain1)
metadata.columns = ['carrier1', 'strain1', 'timepoint1']
df = pd.merge(combos, metadata, on = 'strain1')
# Change metadata name for matching, and repeat above but for strain2
metadata.columns = ['carrier2', 'strain2', 'timepoint2']
df = pd.merge(df, metadata, on = 'strain2')
# Define three conditions
conditions = [
(df['carrier1'] == df['carrier2']) & (df['timepoint1'] == df['timepoint2']),
(df['carrier1'] == df['carrier2']) & (df['timepoint1'] != df['timepoint2']),
df['carrier1'] != df['carrier2']]
# Define values per condition
choices = [
'same_carrier_same_timepoint',
'same_carrier_different_timepoint',
'different_carrier']
# Assign comparison types using numpy and previously defined conditions and choices
df['comparison'] = np.select(conditions, choices, default=np.nan)
# Read the files with molten SNPs (standard and no gaps) and alignment lengths
snpmat_standard = | pd.read_csv(path_snpmat_standard, sep = '\t', names = ['strain1','strain2','SNPs_not_corrected']) | pandas.read_csv |
# flowbysector.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Produces a FlowBySector data frame based on a method file for the given class
To run code, specify the "Run/Debug Configurations" Parameters to the
"flowsa/data/flowbysectormethods" yaml file name
you want to use.
Example: "Parameters: --m Water_national_2015_m1"
Files necessary to run FBS:
a. a method yaml in "flowsa/data/flowbysectormethods"
b. crosswalk(s) for the main dataset you are allocating and any datasets
used to allocate to sectors
c. a .py file in "flowsa/" for the main dataset you are allocating if
you need functions to clean up the FBA
before allocating to FBS
"""
import argparse
import yaml
import pandas as pd
from esupy.processed_data_mgmt import write_df_to_file
import flowsa
from flowsa.common import log, vLog, flowbysectormethodpath, flow_by_sector_fields, \
fips_number_key, flow_by_activity_fields, load_source_catalog, \
flowbysectoractivitysetspath, flow_by_sector_fields_w_activity, \
paths, fba_activity_fields, rename_log_file, \
fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, \
fbs_grouping_fields_w_activities, logoutputpath
from flowsa.metadata import set_fb_meta, write_metadata
from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, \
dataset_allocation_method
from flowsa.sectormapping import add_sectors_to_flowbyactivity, map_fbs_flows, \
get_sector_list
from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, \
aggregator, subset_df_by_geoscale, sector_disaggregation, dynamically_import_fxn
from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores
from flowsa.validation import allocate_dropped_sector_data,\
compare_activity_to_sector_flowamounts, \
compare_fba_geo_subset_and_fbs_output_totals, compare_geographic_totals,\
replace_naics_w_naics_from_another_year, calculate_flowamount_diff_between_dfs
def parse_args():
"""
Make year and source script parameters
:return: dictionary, 'method'
"""
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--method",
required=True, help="Method for flow by sector file. "
"A valid method config file must exist with this name.")
args = vars(ap.parse_args())
return args
def load_method(method_name):
"""
Loads a flowbysector method from a YAML
:param method_name: str, FBS method name (ex. 'Water_national_m1_2015')
:return: dictionary, items in the FBS method yaml
"""
sfile = flowbysectormethodpath + method_name + '.yaml'
try:
with open(sfile, 'r') as f:
method = yaml.safe_load(f)
except IOError:
log.error("FlowBySector method file not found.")
return method
def load_source_dataframe(k, v):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param k: str, The datasource name
:param v: dictionary, The datasource parameters
:return: df of identified parquet
"""
if v['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter to filter dataframe
if 'source_fba_load_scale' in v:
geo_level = v['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving flowbyactivity for datasource %s in year %s", k, str(v['year']))
flows_df = flowsa.getFlowByActivity(datasource=k, year=v['year'], flowclass=v['class'],
geographic_level=geo_level)
elif v['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = flowsa.getFlowBySector(k)
elif v['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", k)
flows_df = dynamically_import_fxn(k, v["FBS_datapull_fxn"])(v)
else:
vLog.error("Data format not specified in method file for datasource %s", k)
return flows_df
def main(**kwargs):
"""
Creates a flowbysector dataset
:param kwargs: dictionary of arguments, only argument is "method_name", the name of method
corresponding to flowbysector method yaml name
:return: parquet, FBS save to local folder
"""
if len(kwargs) == 0:
kwargs = parse_args()
method_name = kwargs['method']
# assign arguments
vLog.info("Initiating flowbysector creation for %s", method_name)
# call on method
method = load_method(method_name)
# create dictionary of data and allocation datasets
fb = method['source_names']
# Create empty list for storing fbs files
fbs_list = []
for k, v in fb.items():
# pull fba data for allocation
flows = load_source_dataframe(k, v)
if v['data_format'] == 'FBA':
# ensure correct datatypes and that all fields exist
flows = clean_df(flows, flow_by_activity_fields,
fba_fill_na_dict, drop_description=False)
# map flows to federal flow list or material flow list
flows_mapped, mapping_files = map_fbs_flows(flows, k, v, keep_fba_columns=True)
# clean up fba, if specified in yaml
if "clean_fba_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity", k)
flows_mapped = dynamically_import_fxn(k, v["clean_fba_df_fxn"])(flows_mapped)
# if activity_sets are specified in a file, call them here
if 'activity_set_file' in v:
aset_names = pd.read_csv(flowbysectoractivitysetspath +
v['activity_set_file'], dtype=str)
else:
aset_names = None
# create dictionary of allocation datasets for different activities
activities = v['activity_sets']
# subset activity data and allocate to sector
for aset, attr in activities.items():
# subset by named activities
if 'activity_set_file' in v:
names = aset_names[aset_names['activity_set'] == aset]['name']
else:
names = attr['names']
vLog.info("Preparing to handle %s in %s", aset, k)
# subset fba data by activity
flows_subset =\
flows_mapped[(flows_mapped[fba_activity_fields[0]].isin(names)) |
(flows_mapped[fba_activity_fields[1]].isin(names)
)].reset_index(drop=True)
# if activities are sector-like, check sectors are valid
if load_source_catalog()[k]['sector-like_activities']:
flows_subset2 =\
replace_naics_w_naics_from_another_year(flows_subset,
method['target_sector_source'])
# check impact on df FlowAmounts
vLog.info('Calculate FlowAmount difference caused by '
'replacing NAICS Codes with %s, saving difference in Validation log',
method['target_sector_source'],)
calculate_flowamount_diff_between_dfs(flows_subset, flows_subset2)
else:
flows_subset2 = flows_subset.copy()
# extract relevant geoscale data or aggregate existing data
flows_subset_geo = subset_df_by_geoscale(flows_subset2, v['geoscale_to_use'],
attr['allocation_from_scale'])
# if loading data subnational geoscale, check for data loss
if attr['allocation_from_scale'] != 'national':
compare_geographic_totals(flows_subset_geo, flows_mapped, k,
attr, aset, names)
# Add sectors to df activity, depending on level of specified sector aggregation
log.info("Adding sectors to %s", k)
flows_subset_wsec =\
add_sectors_to_flowbyactivity(flows_subset_geo,
sectorsourcename=method['target_sector_source'],
allocationmethod=attr['allocation_method'])
# clean up fba with sectors, if specified in yaml
if "clean_fba_w_sec_df_fxn" in v:
vLog.info("Cleaning up %s FlowByActivity with sectors", k)
flows_subset_wsec = \
dynamically_import_fxn(k, v["clean_fba_w_sec_df_fxn"])(flows_subset_wsec,
attr=attr,
method=method)
# rename SourceName to MetaSources and drop columns
flows_mapped_wsec = flows_subset_wsec.\
rename(columns={'SourceName': 'MetaSources'}).\
drop(columns=['FlowName', 'Compartment'])
# if allocation method is "direct", then no need to create alloc ratios,
# else need to use allocation
# dataframe to create sector allocation ratios
if attr['allocation_method'] == 'direct':
fbs = direct_allocation_method(flows_mapped_wsec, k, names, method)
# if allocation method for an activity set requires a specific
# function due to the complicated nature
# of the allocation, call on function here
elif attr['allocation_method'] == 'allocation_function':
fbs = function_allocation_method(flows_mapped_wsec, k, names, attr, fbs_list)
else:
fbs =\
dataset_allocation_method(flows_mapped_wsec, attr,
names, method, k, v, aset,
method_name, aset_names)
# drop rows where flowamount = 0 (although this includes dropping suppressed data)
fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True)
# define grouping columns dependent on sectors being activity-like or not
if load_source_catalog()[k]['sector-like_activities'] is False:
groupingcols = fbs_grouping_fields_w_activities
groupingdict = flow_by_sector_fields_w_activity
else:
groupingcols = fbs_default_grouping_fields
groupingdict = flow_by_sector_fields
# clean df
fbs = clean_df(fbs, groupingdict, fbs_fill_na_dict)
# aggregate df geographically, if necessary
log.info("Aggregating flowbysector to %s level", method['target_geoscale'])
# determine from scale
if fips_number_key[v['geoscale_to_use']] <\
fips_number_key[attr['allocation_from_scale']]:
from_scale = v['geoscale_to_use']
else:
from_scale = attr['allocation_from_scale']
fbs_geo_agg = agg_by_geoscale(fbs, from_scale,
method['target_geoscale'], groupingcols)
# aggregate data to every sector level
log.info("Aggregating flowbysector to all sector levels")
fbs_sec_agg = sector_aggregation(fbs_geo_agg, groupingcols)
# add missing naics5/6 when only one naics5/6 associated with a naics4
fbs_agg = sector_disaggregation(fbs_sec_agg)
# check if any sector information is lost before reaching
# the target sector length, if so,
# allocate values equally to disaggregated sectors
vLog.info('Searching for and allocating FlowAmounts for any parent '
'NAICS that were dropped in the subset to '
'%s child NAICS', method['target_sector_level'])
fbs_agg_2 = allocate_dropped_sector_data(fbs_agg, method['target_sector_level'])
# compare flowbysector with flowbyactivity
compare_activity_to_sector_flowamounts(
flows_mapped_wsec, fbs_agg_2, aset, k, method)
# return sector level specified in method yaml
# load the crosswalk linking sector lengths
sector_list = get_sector_list(method['target_sector_level'])
# subset df, necessary because not all of the sectors are
# NAICS and can get duplicate rows
fbs_1 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_2 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isnull())].\
reset_index(drop=True)
fbs_3 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isnull()) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_sector_subset = | pd.concat([fbs_1, fbs_2, fbs_3]) | pandas.concat |
import os
# Enforce CPU Usage
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Uncommenting enforces CPU usage # Commenting enforces GPU usage
# Seed the Random-Number-Generator in a bid to get 'Reproducible Results'
import tensorflow as tf
from random import seed, sample
from numpy.random import seed
seed(1)
tf.compat.v1.set_random_seed(3)
# load required modules
import pandas as pd
import numpy as np
import math, time
from datetime import datetime, timedelta
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import QuantileTransformer, MinMaxScaler, Normalizer
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, r2_score
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor, BayesianRidge, ARDRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
# Import classes from my custom package
from custom_classes.Starter_Module_01 import Starter
# Global settings for PANDAS frame display
pd.set_option('html.table_schema', True)
pd.set_option('max_columns', 800)
pd.set_option('max_rows', 70000)
def args_parse_cmd():
parser = ArgumentParser(description='START-HELP: Program for forecasting/predicting breakup or schism in social networks', epilog='END-HELP: End of assistance/help section',
formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument('-rp', '--root_path', nargs='+', default='datasets/', type=str, help='Generic root path for application/program')
parser.add_argument('-rm', '--run_mode', nargs='+', default='single', type=str, choices=['single', 'all'], help='Run model per specified dataset OR cumulatively for all intrinsic datasets')
args = parser.parse_args()
return args
def process_reprd_idx(myCls, root_path, fname):
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=0, mode='READ')
exp_reprd_idx = pd.DataFrame()
for i in range(len(df1)):
tmp_start_date = df1.iloc[i,1]
start_date = datetime.strptime(tmp_start_date, '%d-%m-%Y')
tmp_end_date = df1.iloc[i,2]
end_date = datetime.strptime(tmp_end_date, '%d-%m-%Y')
while (start_date <= end_date):
exp_reprd_idx = exp_reprd_idx.append([[str(start_date), df1.values[i,3]]], ignore_index=True)
start_date = start_date + timedelta(days=1)
exp_reprd_idx.to_csv(root_path+fname[:-4]+'_EXPAND.csv', sep=',', header=False, index=False)
def cumm_2_reg_conv(myCls, root_path, fname):
#df1 = myCls.load_data(root_path, fname, sep='\s', header=0, index_col=None, mode='READ')
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
infect_recover_death = pd.DataFrame()
for i in range(len(df1)):
if (i == 0):
temp_infect = df1.values[i,2]
temp_recover = df1.values[i,5]
temp_death = df1.values[i,6]
else:
temp_infect = df1.values[i,2] - df1.values[i-1,2]
temp_recover = df1.values[i,5] - df1.values[i-1,5]
temp_death = df1.values[i,6] - df1.values[i-1,6]
infect_recover_death = infect_recover_death.append([[temp_infect, temp_recover, temp_death]], ignore_index=True)
infect_recover_death.columns = ['Infected Positive (I)', 'Recovered (R)', 'Deaths (D)']
infect_recover_death.to_csv(root_path+fname[:-4]+'_infect_recover_death.csv', sep=',', header=True, index=False)
def wkend_holiday(myCls, root_path, fname):
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
holidays = pd.DataFrame()
for i in range(len(df1)):
if (i % 7 == 0):
holidays = holidays.append([[1]], ignore_index=True) # Saturdays
elif ((i-1) % 7 == 0):
holidays = holidays.append([[1]], ignore_index=True) # Sundays
else:
holidays = holidays.append([[0]], ignore_index=True)
holidays.columns = ['Holiday']
holidays.to_csv(root_path+fname[:-4]+'_holiday.csv', sep=',', header=True, index=False)
def avg_morbility_ontario(myCls, root_path, fname):
df1 = myCls.load_data(root_path, '2020_CA_Region_Mobility_Report.csv', sep=',', header=0, index_col=None, mode='READ')
provinces = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador', 'Nova Scotia', 'Ontario', 'Prince Edward Island', 'Quebec', 'Saskatchewan']
for province in provinces:
df2 = df1.query('sub_region_1 == @province')
df3 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
morbility_data = pd.DataFrame()
for i in range(len(df3)):
search_date = df3.values[i,0]
res = df2.query('date == @search_date') # ALWAYS CHANGE 'DATE' TO SHORT-FORMAT ON .csv FILE
divisor = len(res)
if (divisor > 0):
col1 = res['retail_and_recreation_percent_change_from_baseline'].sum(axis=0, skipna=True)
col2 = res['grocery_and_pharmacy_percent_change_from_baseline'].sum(axis=0, skipna=True)
col3 = res['parks_percent_change_from_baseline'].sum(axis=0, skipna=True)
col4 = res['transit_stations_percent_change_from_baseline'].sum(axis=0, skipna=True)
col5 = res['workplaces_percent_change_from_baseline'].sum(axis=0, skipna=True)
col6 = res['residential_percent_change_from_baseline'].sum(axis=0, skipna=True)
morbility_data = morbility_data.append([[search_date, round(col1/divisor, 3), round(col2/divisor, 3), round(col3/divisor, 3), round(col4/divisor, 3), round(col5/divisor, 3), round(col6/divisor, 3)]], ignore_index=True)
morbility_data.columns = ['date', 'retail_and_recreation_change', 'grocery_and_pharmacy_change', 'parks_change', 'transit_stations_change', 'workplaces_change', 'residential_change']
morbility_data.to_csv(root_path+province+'_morbility_data.csv', sep=',', header=True, index=False)
def case_age_gender_computer(myCls, root_path, fname):
### Determine Charset encoding of .csv file (USE ORIGINAL FILE AS DOWNLOADED) ###
import chardet
local_file = root_path + 'COVID-19_Case_Details_Age-Group_Canada.csv'
with open(local_file, 'rb') as rawdata:
result = chardet.detect(rawdata.read(100000))
print('The charset encoding is: ', result)
### Determine Charset encoding of .csv file (USE ORIGINAL FILE AS DOWNLOADED) ###
df1 = myCls.load_data(root_path, 'COVID-19_Case_Details_Age-Group_Canada.csv', sep=',', header=0, index_col=0, mode='READ')
provinces = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'NL', 'Nova Scotia', 'Ontario', 'PEI', 'Quebec', 'Saskatchewan']
for prvnce in provinces:
df2 = df1.query('province == @prvnce')
df3 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
age_gender_data = pd.DataFrame()
for i in range(len(df3)):
search_date = df3.values[i,0]
res_df = df2.query('date_reported == @search_date') # ALWAYS CHANGE 'DATE' TO SHORT-FORMAT ON .csv FILE
male_0_34 = 0
male_35_69 = 0
male_70_above = 0
female_0_34 = 0
female_35_69 = 0
female_70_above = 0
generic_bias = 0
if (len(res_df) > 0):
for j in range(len(res_df)):
if (((res_df.values[j,3] == '<20') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == '20-29') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == '30-39') and (res_df.values[j,4] == 'Male'))):
male_0_34 = male_0_34 + 1
elif (((res_df.values[j,3] == '40-49') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == '50-59') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == '60-69') and (res_df.values[j,4] == 'Male'))):
male_35_69 = male_35_69 + 1
elif (((res_df.values[j,3] == '70-79') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == '80+') and (res_df.values[j,4] == 'Male')) or ((res_df.values[j,3] == 'Not Reported') and (res_df.values[j,4] == 'Male'))):
male_70_above = male_70_above + 1
elif (((res_df.values[j,3] == '<20') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == '20-29') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == '30-39') and (res_df.values[j,4] == 'Female'))):
female_0_34 = female_0_34 + 1
elif (((res_df.values[j,3] == '40-49') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == '50-59') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == '60-69') and (res_df.values[j,4] == 'Female'))):
female_35_69 = female_35_69 + 1
elif (((res_df.values[j,3] == '70-79') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == '80+') and (res_df.values[j,4] == 'Female')) or ((res_df.values[j,3] == 'Not Reported') and (res_df.values[j,4] == 'Female'))):
female_70_above = female_70_above + 1
elif (res_df.values[j,4] == 'Not Reported'):
generic_bias = generic_bias + 1
# 'generic_bias' resolution & file update
bias = generic_bias / 6
if ((generic_bias % 6) == 1):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.floor(bias), male_35_69+math.floor(bias), male_70_above+math.ceil(bias), female_0_34+math.floor(bias), female_35_69+math.floor(bias), female_70_above+math.floor(bias)]], ignore_index=True)
elif ((generic_bias % 6) == 2):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.floor(bias), male_35_69+math.floor(bias), male_70_above+math.ceil(bias), female_0_34+math.floor(bias), female_35_69+math.floor(bias), female_70_above+math.ceil(bias)]], ignore_index=True)
elif ((generic_bias % 6) == 3):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.floor(bias), male_35_69+math.ceil(bias), male_70_above+math.ceil(bias), female_0_34+math.floor(bias), female_35_69+math.floor(bias), female_70_above+math.ceil(bias)]], ignore_index=True)
elif ((generic_bias % 6) == 4):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.floor(bias), male_35_69+math.ceil(bias), male_70_above+math.ceil(bias), female_0_34+math.floor(bias), female_35_69+math.ceil(bias), female_70_above+math.ceil(bias)]], ignore_index=True)
elif ((generic_bias % 6) == 5):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.ceil(bias), male_35_69+math.ceil(bias), male_70_above+math.ceil(bias), female_0_34+math.floor(bias), female_35_69+math.ceil(bias), female_70_above+math.ceil(bias)]], ignore_index=True)
elif ((generic_bias % 6) == 0):
age_gender_data = age_gender_data.append([[search_date, male_0_34+math.ceil(bias), male_35_69+math.ceil(bias), male_70_above+math.ceil(bias), female_0_34+math.ceil(bias), female_35_69+math.ceil(bias), female_70_above+math.ceil(bias)]], ignore_index=True)
else:
age_gender_data = age_gender_data.append([[search_date, male_0_34, male_35_69, male_70_above, female_0_34, female_35_69, female_70_above]], ignore_index=True)
age_gender_data.columns = ['date', 'COV_M_0_34', 'COV_M_35_69', 'COV_M_70_above', 'COV_F_0_34', 'COV_F_35_69', 'COV_F_70_above']
age_gender_data.to_csv(root_path+prvnce+'_age_gender_data.csv', sep=',', header=True, index=False)
def feat_extractor(dframe, label='train'):
TotPopln = dframe.loc[:,'0 - 34 (M)'] + dframe.loc[:,'35 - 69 (M)'] + dframe.loc[:,'70 - Above (M)'] + dframe.loc[:,'0 - 34 (F)'] + dframe.loc[:,'35 - 69 (F)'] + dframe.loc[:,'70 - Above (F)']
dframe['mobility_index'] = dframe.loc[:,'retail_and_recreation_change'] + dframe.loc[:,'grocery_and_pharmacy_change'] + dframe.loc[:,'parks_change'] + dframe.loc[:,'transit_stations_change'] + dframe.loc[:,'workplaces_change'] + dframe.loc[:,'residential_change']
dframe['mobility_totpop'] = dframe.loc[:,'mobility_index'].div(TotPopln, axis=0)
dframe['infect_mobility'] = dframe.loc[:,'Virus Reprod Index'].mul(dframe['mobility_index'], axis=0)
dframe['wave_climate'] = dframe.loc[:,'Wave'].div(dframe['Climate'], axis=0)
dframe['infect_totpop_ratio'] = dframe.loc[:,'Virus Reprod Index'].div(TotPopln, axis=0)
dframe['infect_travel_ratio'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['Return Travellers'], axis=0)
dframe['infect_M_0_34'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['0 - 34 (M)'], axis=0)
dframe['infect_M_35_69'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['35 - 69 (M)'], axis=0)
dframe['infect_M_70_above'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['70 - Above (M)'], axis=0)
dframe['infect_F_0_34'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['0 - 34 (F)'], axis=0)
dframe['infect_F_35_69'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['35 - 69 (F)'], axis=0)
dframe['infect_F_70_above'] = dframe.loc[:,'Virus Reprod Index'].div(dframe['70 - Above (F)'], axis=0)
dframe['immunity_ratio'] = dframe.loc[:,'Cumm Vaccine'].div(TotPopln, axis=0)
dframe['travel_totpop'] = dframe.loc[:,'Return Travellers'].div(TotPopln, axis=0)
dframe['travel_land'] = dframe.loc[:,'Return Travellers'].div(dframe['Dry Land'], axis=0)
dframe['pop_density'] = TotPopln.div(dframe['Dry Land'], axis=0)
dframe['chc_density'] = dframe.loc[:,'CHCentres'].div(dframe['Dry Land'], axis=0)
print("Shape of 'supplemented' ", label, " data: ", dframe.shape)
def ppe_pred_dset(myCls, root_path, fname):
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
ppe_pred = pd.DataFrame()
for i in range(len(df1)):
# ppe_pred
CHCentres = 153
HealthPersons = 7253 + 8996 + 519 + 34094 + 160137 + 3446 # MedLabTech(7253) + Parameds(8996) + PhysicianAssts(519) + Physicians(34094) + Nurses(160137) + RespiratoryTherapists(3446)
Cap_HealthPersons = 0.85
hosptlztn = df1.values[i,2]
hosptlztn_ratio = hosptlztn/CHCentres
if (hosptlztn_ratio >= 1): # There is at least '1' COVID19 hospitalization-case per CHCentre
ppe_qty = Cap_HealthPersons * HealthPersons * 1
else:
ppe_qty = Cap_HealthPersons * HealthPersons * hosptlztn_ratio
ppe_pred = ppe_pred.append([[ppe_qty]], ignore_index=True)
y_data = pd.concat([pd.DataFrame(df1.values[:,0]), pd.DataFrame(df1.values[:,1]), pd.DataFrame(df1.values[:,2]), ppe_pred, pd.DataFrame(df1.values[:,3]), pd.DataFrame(df1.values[:,4])], axis='columns')
y_data.columns = ['Reported Date', 'Infected Positive (I)', 'Hospitalized (incls. ICU)', 'PPE Qty', 'Recovered (R)', 'Deaths (D)']
y_data.to_csv(root_path+'covidtesting_ontario_hospitalized.csv', sep=',', header=True, index=False)
def covid19_prevalence(myCls, root_path, fname):
df1 = myCls.load_data(root_path, fname, sep=',', header=0, index_col=None, mode='READ')
# Plot performance of the Network-Model fitting on the dataset
plt.figure(figsize=(32,15)) # Using "plt.figure()" to tweak the resultant graph plot
plt.subplot(1, 2, 1) # subplot(rows, cols, active_ROW-COL_in_subplotSpace)
plt.grid()
plt.plot(df1.loc[:,'COV_M_0_34'], 'k-', marker='o', label='0 - 34 (Male Age Group)')
plt.plot(df1.loc[:,'COV_M_35_69'], 'b--', marker='*', label='35 - 69 (Male Age Group)')
plt.plot(df1.loc[:,'COV_M_70_above'], 'g:', marker='D', label='70 - Above (Male Age Group)')
plt.xlabel('Days (Jan. 25, 2020 - Jan. 20, 2021)')
plt.ylabel('Number of Infection/Positive Cases')
plt.legend(loc='best')
plt.title('SARS-CoV-2 Infection Prevalence in Ontario (Male Age Group)')
plt.subplot(1, 2, 2) # subplot(rows, cols, active_ROW-COL_in_subplotSpace)
plt.grid()
plt.plot(df1.loc[:,'COV_F_0_34'], 'k-', marker='o', label='0 - 34 (Female Age Group)')
plt.plot(df1.loc[:,'COV_F_35_69'], 'b--', marker='*', label='35 - 69 (Female Age Group)')
plt.plot(df1.loc[:,'COV_F_70_above'], 'g:', marker='D', label='70 - Above (Female Age Group)')
plt.xlabel('Days (Jan. 25, 2020 - Jan. 20, 2021)')
plt.ylabel('Number of Infection/Positive Cases')
plt.legend(loc='best')
plt.title('SARS-CoV-2 Infection Prevalence in Ontario (Female Age Group)')
plt.savefig(root_path + 'plots_and_data/COVID-19_Prevalence_Males_and_Females.png')
plt.show()
def ppe_pred(data):
data = list(data)
ppe_pred = list()
for i in range(len(data)):
# ppe_pred
CHCentres = 153
HealthPersons = 7253 + 8996 + 519 + 34094 + 160137 + 3446 # MedLabTech(7253) + Parameds(8996) + PhysicianAssts(519) + Physicians(34094) + Nurses(160137) + RespiratoryTherapists(3446)
Cap_HealthPersons = 0.85
hosptlztn = data[i]
hosptlztn_ratio = hosptlztn/CHCentres
if (hosptlztn_ratio >= 1): # There is at least '1' COVID19 hospitalization-case per CHCentre
ppe_qty = Cap_HealthPersons * HealthPersons * 1
else:
ppe_qty = Cap_HealthPersons * HealthPersons * hosptlztn_ratio
ppe_pred.append(np.rint(ppe_qty).astype(np.int32))
return ppe_pred
def percent_comp(data):
data = list(data)
cap = max(data)
new_list = list()
for i in range(len(data)):
percent = (data[i]/cap) * 100
percent = np.rint(percent).astype(np.int32)
new_list.append(str(percent)+'%')
return new_list
#################################################################### Program Flow ####################################################################
def main_prog_flow(myCls, args):
# ".iloc[]" returns a Pandas DATAFRAME
# ".values[]" returns a NUMPY Array wrt dataframes
#cumm_2_reg_conv(myCls, args.root_path, 'covidtesting_ontario.csv')
#wkend_holiday(myCls, args.root_path, 'covidtesting_ontario.csv')
#avg_morbility_ontario(myCls, args.root_path, 'covidtesting_ontario.csv')
#case_age_gender_computer(myCls, args.root_path, 'covidtesting_ontario.csv')
# MISSING DATA: 'SimpleImputer' or 'IterativeImputer'
# STANDARDIZATION (Column-wise): Scale each/individual features (columns) to have values within a fixed range, usually [0,1]
# NON-LINEAR TRANSFORMATION (Column-wise): 'QuantileTransformer()' transforms features (columns) containing 'skewed/congested' or 'highly-spread' data into a standard normal/probability distribution
# NORMALIZATION (Row-wise): Scale each/individual samples (rows) to usually a unit-norm (square of all columns in the row == 1)
### Generic hyperparameters
model = "ML" # TL/MTL | ML
train_frac = 0.85
test_frac = 1 - train_frac
### Load and Aggregate datasets
if (not os.path.isfile(args.root_path+'covidtesting_train_data.csv')) or (not os.path.isfile(args.root_path+'covidtesting_test_data.csv')):
train_data = pd.DataFrame()
test_data = pd.DataFrame()
cols_aft_drop = ['Infected Positive (I)', 'Hospitalized (incls. ICU)', 'Recovered (R)', 'Deaths (D)', 'Cumm Recovered', 'Cumm Deaths', 'Region', 'Wave', 'Cumm Vaccine', 'Virus Reprod Index', 'Lockdown', 'Travel Restrict', 'Province FaceCover', 'Holiday', 'Climate', 'retail_and_recreation_change', 'grocery_and_pharmacy_change', 'parks_change', 'transit_stations_change', 'workplaces_change', 'residential_change', 'Return Travellers', 'Employ Rate', 'Unemploy Rate', 'Labor Popln', 'Dry Land', 'CHCentres', '0 - 34 (M)', '35 - 69 (M)', '70 - Above (M)', '0 - 34 (F)', '35 - 69 (F)', '70 - Above (F)']
dset_suffix = ['alberta', 'british_columbia', 'manitoba', 'new_brunswick', 'ontario', 'quebec', 'saskatchewan']
for suffix in dset_suffix:
df = myCls.load_data(args.root_path, 'covidtesting_'+suffix+'.csv', sep=',', header=0, index_col=0, mode='READ')
df.drop(['Labor Popln (x1000)', 'Males', 'Avg Age (M)', 'Females', 'Avg Age (F)', 'Life Expectancy', 'COV_M_0_34', 'COV_M_35_69', 'COV_M_70_above', 'COV_F_0_34', 'COV_F_35_69', 'COV_F_70_above'], axis='columns', inplace=True)
# MISSING DATA: 'SimpleImputer' or 'IterativeImputer'
missing_data_imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputed_data = missing_data_imputer.fit_transform(df.values[:,:])
### 'ontario'-VALIDATION SAMPLE ###
if (suffix == 'ontario'):
test_val = np.rint(test_frac * len(imputed_data)).astype(np.int32)
ontario_dset_idx = [a for a in range(len(imputed_data))]
unique_subset = sample(ontario_dset_idx, test_val) # Selects a random/shuffled subset without replacement (UNIQUE SUBSET)
temp = pd.DataFrame(imputed_data[unique_subset,:])
test_data = test_data.append(temp, ignore_index=True)
test_data.columns = cols_aft_drop
test_data.to_csv(args.root_path+'covidtesting_test_data.csv', sep=',', header=True, index=False)
imputed_data = pd.DataFrame(imputed_data)
imputed_data.drop(unique_subset, axis='index', inplace=True)
### 'ontario'-VALIDATION SAMPLE ###
train_data = train_data.append(pd.DataFrame(imputed_data), ignore_index=True)
train_data.columns = cols_aft_drop
train_data.to_csv(args.root_path+'covidtesting_train_data.csv', sep=',', header=True, index=False)
else:
train_data = myCls.load_data(args.root_path, 'covidtesting_train_data.csv', sep=',', header=0, index_col=None, mode='READ')
test_data = myCls.load_data(args.root_path, 'covidtesting_test_data.csv', sep=',', header=0, index_col=None, mode='READ')
train_data = train_data.sample(frac=1, random_state=42, axis='index') # Shuffle 'train' dataset
test_data = test_data.sample(frac=1, random_state=42, axis='index') # Shuffle 'test' dataset
print("Shape of 'original' train data: ", train_data.shape)
print("Shape of 'original' test data: ", test_data.shape)
### Feature Extraction: Computation of additional/supplementary features
feat_extractor(train_data, label='train')
feat_extractor(test_data, label='test')
### Data Visualizations
print(train_data.head(n=5)) # First 5 rows of dataset
print(train_data.info(verbose=True)) # Basic information related to dataset
#print(train_data.tail(n=10)) # Last 10 rows of dataset
#print(train_data.describe()) # Describe basic statistical information wrt dataset
# Show Histogram-plot of dataset (bin == buckets/classes)
disp_cols = ['Infected Positive (I)', 'Hospitalized (incls. ICU)', 'Recovered (R)', 'Deaths (D)', 'Cumm Recovered', 'Cumm Deaths', 'Region', 'Wave', 'Cumm Vaccine', 'Virus Reprod Index', 'Lockdown', 'Travel Restrict', 'Province FaceCover', 'Holiday', 'Climate', 'retail_and_recreation_change', 'grocery_and_pharmacy_change', 'parks_change', 'transit_stations_change', 'workplaces_change', 'residential_change', 'Return Travellers', 'Employ Rate', 'Unemploy Rate', 'Labor Popln']
data_hist = train_data.hist(column=disp_cols, xlabelsize=20, ylabelsize=20, figsize=(22,21), bins=50)
print(data_hist)
### Feature Selection:
train_X = train_data.values[:,7:]
train_y = train_data.values[:,:4]
test_X = test_data.values[:,7:]
test_y = test_data.values[:,:4]
data_directory = 'plots_and_data/covidtesting'
fname = args.root_path + data_directory
X_extract_cols = ['Wave', 'Cumm Vaccine', 'Virus Reprod Index', 'Lockdown', 'Travel Restrict', 'Province FaceCover', 'Holiday', 'Climate', 'retail_and_recreation_change', 'grocery_and_pharmacy_change', 'parks_change', 'transit_stations_change', 'workplaces_change', 'residential_change', 'Return Travellers', 'Employ Rate', 'Unemploy Rate', 'Labor Popln', 'Dry Land', 'CHCentres', '0 - 34 (M)', '35 - 69 (M)', '70 - Above (M)', '0 - 34 (F)', '35 - 69 (F)', '70 - Above (F)', 'mobility_index', 'mobility_totpop', 'infect_mobility', 'wave_climate', 'infect_totpop_ratio', 'infect_travel_ratio', 'infect_M_0_34', 'infect_M_35_69', 'infect_M_70_above', 'infect_F_0_34', 'infect_F_35_69', 'infect_F_70_above', 'immunity_ratio', 'travel_totpop', 'travel_land', 'pop_density', 'chc_density']
#X_extract_cols = ['0: Wave', '1: Cumm Vaccine', '2: Virus Reprod Index', '3: Lockdown', '4: Travel Restrict', '5: Province FaceCover', '6: Holiday', '7: Climate', '8: retail_and_recreation_change', '9: grocery_and_pharmacy_change', '10: parks_change', '11: transit_stations_change', '12: workplaces_change', '13: residential_change', '14: Return Travellers', '15: Employ Rate', '16: Unemploy Rate', '17: Labor Popln', '18: Dry Land', '19: CHCentres', '20: 0 - 34 (M)', '21: 35 - 69 (M)', '22: 70 - Above (M)', '23: 0 - 34 (F)', '24: 35 - 69 (F)', '25: 70 - Above (F)', '26: mobility_index', '27: mobility_totpop', '28: infect_mobility', '29: wave_climate', '30: infect_totpop_ratio', '31: infect_travel_ratio', '32: infect_M_0_34', '33: infect_M_35_69', '34: infect_M_70_above', '35: infect_F_0_34', '36: infect_F_35_69', '37: infect_F_70_above', '38: immunity_ratio', '39: travel_totpop', '40: travel_land', '41: pop_density', '42: chc_density']
y_labels = ['Infected +ve (I) in Ontario', 'Hospitalized (H) in Ontario', 'Recovered (R) in Ontario', 'Deaths (D) in Ontario']
# Compute Correlation-Coefficients matrix
if model == 'TL/MTL':
myCls.correlation_coefs(train_data, 'Infected Positive (I)', fname)
# 'ExtraTreesRegressor' Feature-Selection strategy
top_feats = ExtraTreesRegressor()
top_feats = top_feats.fit(train_X, train_y[:,0])
feats_score = pd.Series(top_feats.feature_importances_)
feats_percent = percent_comp(top_feats.feature_importances_)
feats_percent = pd.Series(feats_percent)
cols = pd.Series(X_extract_cols)
feats_rank = pd.concat([cols, feats_score, feats_percent], axis='columns')
feats_rank.columns = ['Feature/Variable', 'Score', 'Percentage']
print(feats_rank.nlargest(12, 'Score')) # Sort pandas dataframe by 'Score
feats_score.index = X_extract_cols
feats_score.nlargest(12).plot(kind='barh', grid=True, figsize=(33,15))
plt.xlabel('Relevance Score')
plt.title('Feature Selection (ExtraTreesRegressor Top-k Features): ' + y_labels[0])
plt.savefig(fname[:-12] + y_labels[0] + '_ExtraTreesRegr_Features.png')
plt.show()
# 'SelectKBest' Feature-Selection strategy
for p in range(train_y.shape[1]):
top_22_feats = SelectKBest(score_func=f_regression, k=12)
top_22_feats = top_22_feats.fit(train_X, train_y[:,p])
relv_score = | pd.Series(top_22_feats.scores_) | pandas.Series |
import logging
import os
import pandas as pd
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Count
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views import generic
from django.http import HttpResponse
from django.conf import settings
from sickle import Sickle
import xml.etree.ElementTree as ET
import requests
from backend.models import Goal, SiddataUser, SiddataUserStudy, Degree, Subject, Activity
from recommenders.recommender_functions import get_active_recommenders
# from dashboard.create_chart import build_eval_df
from dashboard import create_plots
from django.utils.text import slugify
def home(request):
"""Start page.
:param request The view's request object."""
logger = logging.getLogger("backend.views.home")
logger.debug("Home loaded...")
page = 'home' # for highlighting navbar entry
return render(request, "backend/home.html")
@login_required
def list_goals(request):
"""Display all goals.
:param request The view's request object.
:returns renderable response from form template."""
page = 'list_goals' # for highlighting navbar entry
goals = Goal.objects.all().order_by('-makedate') # fetch all goals
num_goals = Goal.objects.count()
num_users = SiddataUser.objects.count()
degrees = Degree.objects.annotate(num=Count('siddatauserstudy')).order_by('name')
subjects = Subject.objects.annotate(num=Count('siddatauserstudy')).order_by('name')
return render(request, "backend/goallist.html", locals())
@login_required
def goals_data(request):
subjects = [] # subjects filter
degrees = [] # degrees filter
for param_key in request.POST:
if param_key.startswith('subject-'): # subjects to include
try:
subjects.append(Subject.objects.get(pk=int(param_key.split("-", 1)[1])))
except ValueError: # malformed id
continue
if param_key.startswith('degree-'): # competencies to include
try:
degrees.append(Degree.objects.get(pk=param_key.split("-", 1)[1]))
except ValueError:
continue
all_goals = Goal.objects.all()
if subjects:
all_goals = all_goals.filter(userrecommender__user__siddatauserstudy__subject__in=subjects)
if degrees:
all_goals = all_goals.filter(userrecommender__user_siddatauserstudy__degree__in=degrees)
if 'nopaging' in request.POST: # all on one page...
wpp = len(all_goals) # goals per page
page = 1
else:
wpp = 25
page = request.POST.get('page')
paginator = Paginator(all_goals, wpp) # Show 25 contacts per page
try:
goals = paginator.page(page)
offset = (int(page) - 1) * 25
except PageNotAnInteger:
# If page is not an integer, deliver first page.
goals = paginator.page(1)
offset = 0
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
goals = paginator.page(paginator.num_pages)
offset = (paginator.num_pages - 1) * 25
return render(request, 'backend/goals_data.html', locals())
@login_required
def goal_form(request):
"""Display goal form.
:param request The view's request object.
:returns renderable response from form template."""
page = 'goal_form' # for highlighting navbar entry
goals = Goal.objects.all()
return render(request, "backend/goalform.html", locals())
@login_required
def add_goal(request):
goal = request.POST.get("goal", None)
if goal:
u = SiddataUser.objects.get_or_create(origin_id='internal', origin='internal')[0]
for study in [{"degree": {"name": "Bachelor of Science (B.Sc.)", "degree_origin_id": "14"},
"subject": {"name": "Cognitive Science", "subject_origin_id": "729"},
"semester": 3}]:
us = SiddataUserStudy.objects.get_or_create(user=u,
degree_id=Degree.objects.get(name=study['degree']['name'],
degree_origin_id=study['degree'][
'degree_origin_id']).pk,
subject_id=Subject.objects.get(name=study['subject']['name'],
subject_origin_id=
study['subject'][
'subject_origin_id']).pk,
semester=study['semester'])[0]
g = Goal(goal=goal, userrecommender__user=u)
g.save()
messages.info(request, "Ziel '{}' angelegt.".format(goal))
else:
messages.warning(request, 'Ziel konnte nicht angelegt werden.')
return redirect('list_goals')
def backend_js(request):
return render(request, "backend/backend.js", locals())
@login_required
def backdoor(request, userid=None, view=None, list=None):
"""Provide simple client interface in Django backend."""
if not userid:
user = None # perhaps there is no user yet
username = request.GET.get('user', 'test001') # default: test001
originname = request.GET.get('origin', 'UOS') # default: UOS
else:
user = get_object_or_404(SiddataUser, pk=userid)
username = user.user_origin_id
originname = user.origin.api_endpoint
if not view and not list:
view = 'stats'
# fetch data via API
# this would not strictly be necessary because we have database access, but we want to use the standard api mechanisms
# e.g. for creating new users
import requests, json
r = requests.get('http://localhost:8000/backend/api/lists?origin={}&user_origin_id={}'.format(originname, username))
json_result = json.loads(r.text)
result = [json_result['lists'][x] for x in json_result['lists'].keys()] # reorganize as list of list dictionaries (TODO: Sorting)
lists = json_result['lists'].keys()
if not user: # data passed from form, perhaps new user?
user = get_object_or_404(SiddataUser, origin__api_endpoint=originname, user_origin_id=username)
if view == 'stats':
# fetch some statictics from database
num_recommenders = Goal.objects.filter(userrecommender__user=user).distinct('recommender').count() # number of active recommenders
num_goals = Goal.objects.filter(userrecommender__user=user).count() # number of goals for user
num_activities = Activity.objects.filter(goal__userrecommender__user=user).count() # number of activities for user
num_done_activities = Activity.objects.filter(goal__userrecommender__user=user, status="done").count() # number of activities for user
recommenders = get_active_recommenders()
# one template for everything
return render(request, "backend/backdoor.html", locals())
@login_required
def backdoor_interact(request, userid):
"""Receive and handle interaction feedback for backdoor client."""
type = request.GET.get("type", None) # determine type of interaction (question or ...)
user = get_object_or_404(SiddataUser, pk=userid)
if type == 'question':
activity_id = request.GET.get("activity_id", None)
answers = request.GET.get("answers", None)
if activity_id and answers:
data = {'data': {'activity_id': activity_id, 'type': 'Activity', 'attributes': {'answers': [answers], 'status': 'done'}}}
response = requests.patch('http://localhost:8000/backend/api/activity', json=data)
if response.ok:
messages.add_message(request, messages.INFO, 'Answer was processed.')
else: # code >= 400
messages.add_message(request, messages.ERROR, 'Error {} {}: {}'.format(response.status_code, response.reason, response.text))
else: # not all parameters given
messages.add_message(request, messages.WARNING, 'Malformed request (activtiy_id or answers missing for question.'.format(type))
elif type == 'reset':
if user:
uname = "{} ({})".format(user.user_origin_id, user.origin.api_endpoint)
user.delete()
messages.add_message(request, messages.INFO, 'User {} deleted.'.format(uname))
return redirect(reverse('backdoor')) # redirect to start of backdoor
else:
messages.add_message(request, messages.WARNING, 'Unknown user: {}'.format(userid))
else: # unknown type
messages.add_message(request, messages.WARNING, 'Unknown type: {}'.format(type))
next = request.GET.get('next', reverse('backdoor_user', kwargs={'userid':user.id})) # redirect to where we came from (filled by template as GET parameter next ro to user's backddor start)
return redirect(next) # show activities for current user again
def oer(request):
# rest Api test
# response = requests.post(
# url="https://www.twillo.de/edu-sharing/rest/node/v1/nodes/-home-",
# params={"query": "title:*"},
# headers={
# "Accept": "application/json",
# "Content-Type": "application/json",
# },
# )
# print(json.loads(response.content))
# return redirect('home')
# Get all resources by using oai service
sickle = Sickle("https://www.twillo.de/edu-sharing/eduservlet/oai/provider")
records = sickle.ListRecords(metadataPrefix='lom')
ns = {"oai": "http://www.openarchives.org/OAI/2.0/",
"lom": "http://ltsc.ieee.org/xsd/LOM"}
for record in records:
# Parse LOM-XML
root = ET.fromstring(record.raw)
header = root.find("oai:header", ns)
metadata = root.find("oai:metadata/lom:lom", ns)
# Mapping lom to Dublin Core partially based on
# https://www.researchgate.net/figure/Mapping-between-Unqualified-Dublin-Core-and-IEEE-LOM_tbl1_221425064
dcmes = {
"Contributor": list(elem.text for elem in metadata.findall(".//lom:lifeCycle/lom:contribute/lom:entity", namespaces=ns)),
"Coverage": list(elem.text for elem in metadata.findall(".//lom:classification/lom:taxonPath/lom:taxon/lom:entry/string", namespaces=ns)),
"Creator": metadata.findtext(".//lom:metaMetadata/lom:contribute/lom:entity", namespaces=ns),
"Date": header.findtext(".//oai:datestamp", namespaces=ns),
"Description": metadata.findtext(".//lom:general/lom:description/string", namespaces=ns),
"Format": metadata.findtext(".//lom:technical/lom:format", namespaces=ns),
"Identifier": metadata.findtext(".//lom:general/lom:identifier/lom:entry", namespaces=ns),
"Language": metadata.findtext(".//lom:general/lom:language", namespaces=ns),
"Publisher": metadata.findtext(".//lom:lifeCycle/lom:contribute[lom:role='publisher']/lom:entity", namespaces=ns),
"Relation": metadata.findtext(".//lom:technical/lom:location", namespaces=ns),
"Rights": metadata.findtext(".//lom:rights/lom:description/string", namespaces=ns),
"Source": metadata.findtext(".//lom:relation/lom:kind/lom:source", namespaces=ns),
"Subject": list(metadata.find(".//lom:general/lom:keyword", namespaces=ns).itertext()),
"Title": metadata.findtext(".//lom:general/lom:title/string", namespaces=ns),
"Type": list(metadata.find(".//lom:educational/lom:learningResourceType", namespaces=ns).itertext()),
}
print(dcmes)
return redirect('home')
################################### for dashboard ###################################
#path('export_csv', permission_required('auth.view_dashboard')(views.export_csv), name="export_csv"),
def export_csv(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="csv_dump.csv"'
charts = create_plots.create()
final_df = pd.DataFrame()
for chart in charts:
if isinstance(chart.data, pd.DataFrame):
final_df = pd.concat((final_df, chart.data))
else:
try:
final_df = pd.concat((final_df, | pd.DataFrame(chart.data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 07:59:39 2021
@author: suriyaprakashjambunathan
"""
#Regressors
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.ensemble.forest import ExtraTreesRegressor
from sklearn.ensemble.bagging import BaggingRegressor
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.ensemble.weight_boosting import AdaBoostRegressor
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model.bayes import ARDRegression
from sklearn.linear_model.huber import HuberRegressor
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.passive_aggressive import PassiveAggressiveRegressor
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.linear_model.theil_sen import TheilSenRegressor
from sklearn.linear_model.ransac import RANSACRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors.regression import KNeighborsRegressor
from sklearn.neighbors.regression import RadiusNeighborsRegressor
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.tree.tree import DecisionTreeRegressor
from sklearn.tree.tree import ExtraTreeRegressor
from sklearn.svm.classes import SVR
from sklearn.linear_model import BayesianRidge
from sklearn.cross_decomposition import CCA
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Lars
from sklearn.linear_model import LarsCV
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
from sklearn.linear_model import LassoLars
from sklearn.linear_model import LassoLarsIC
from sklearn.linear_model import LassoLarsCV
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.linear_model import MultiTaskElasticNetCV
from sklearn.linear_model import MultiTaskLasso
from sklearn.linear_model import MultiTaskLassoCV
from sklearn.svm import NuSVR
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.cross_decomposition import PLSCanonical
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import RidgeCV
from sklearn.svm import LinearSVR
# Classifiers
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm.classes import OneClassSVM
from sklearn.neural_network.multilayer_perceptron import MLPClassifier
from sklearn.neighbors.classification import RadiusNeighborsClassifier
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.multioutput import ClassifierChain
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.gaussian_process.gpc import GaussianProcessClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.ensemble.bagging import BaggingClassifier
from sklearn.ensemble.forest import ExtraTreesClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.calibration import CalibratedClassifierCV
from sklearn.naive_bayes import GaussianNB
from sklearn.semi_supervised import LabelPropagation
from sklearn.semi_supervised import LabelSpreading
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import NearestCentroid
from sklearn.svm import NuSVC
from sklearn.linear_model import Perceptron
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.mixture import GaussianMixture
Name_c = ['BaggingClassifier',
'BernoulliNB',
'CalibratedClassifierCV',
'ComplementNB',
'DecisionTreeClassifier',
'DummyClassifier',
'ExtraTreeClassifier',
'ExtraTreesClassifier',
'GaussianNB',
'GaussianProcessClassifier',
'GradientBoostingClassifier',
'HistGradientBoostingClassifier',
'KNeighborsClassifier',
'LabelPropagation',
'LabelSpreading',
'LinearDiscriminantAnalysis',
'LinearSVC',
'LogisticRegression',
'LogisticRegressionCV',
'MLPClassifier',
'MultinomialNB',
'NearestCentroid',
'PassiveAggressiveClassifier',
'Perceptron',
'QuadraticDiscriminantAnalysis',
'RadiusNeighborsClassifier',
'RandomForestClassifier',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SVC']
Name_r = [ "RandomForestRegressor",
"ExtraTreesRegressor",
"BaggingRegressor",
"GradientBoostingRegressor",
"AdaBoostRegressor",
"GaussianProcessRegressor",
"ARDRegression",
"HuberRegressor",
"LinearRegression",
"PassiveAggressiveRegressor",
"SGDRegressor",
"TheilSenRegressor",
"KNeighborsRegressor",
"RadiusNeighborsRegressor",
"MLPRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"SVR",
"BayesianRidge",
"CCA",
"ElasticNet",
"ElasticNetCV",
"KernelRidge",
"Lars",
"LarsCV",
"Lasso",
"LassoCV",
"LassoLars",
"LassoLarsIC",
"LassoLarsCV",
"NuSVR",
"OrthogonalMatchingPursuit",
"OrthogonalMatchingPursuitCV",
"PLSCanonical",
"Ridge",
"RidgeCV",
"LinearSVR"]
# Importing the Libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import explained_variance_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.simplefilter(action='ignore')
# Fitting the nan values with the average
def avgfit(l):
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
avg = sum(arr)/len(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
return(fit_arr)
# Weighted Mean Absolute Percentage Error
def mean_absolute_percentage_error(y_true, y_pred):
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
def regressors(Name, X_train,y_train):
regs = []
for i in range(len(Name)):
regressor = globals()[Name[i]]
print(regressor)
Regressor = regressor()
Regressor.fit(X_train, y_train)
regs.append(Regressor)
return(regs)
def classifiers(Name, X_train,y_train):
clfs = []
for i in range(len(Name)):
classifier = globals()[Name[i]]
print(classifier)
Classifier = classifier()
Classifier.fit(X_train, y_train)
clfs.append(Classifier)
return(clfs)
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
y1 = pd.DataFrame(bw)
y2 = pd.DataFrame(gain)
y3 = pd.DataFrame(vswr)
# Accuracy list
acc_list = []
params = ['bandwidth','gain','vswr']
y = pd.DataFrame()
y['bandwidth'] = bw
y['vswr'] = vswr
y['gain'] = gain
acc_conf = []
max_acc = []
for param in params:
print(param)
# Splitting into Test and Train set
X_train, X_test, y_train, y_test = train_test_split(Xi, y[param], test_size = 0.3, random_state = 0)
y_train = pd.DataFrame(y_train)
y_test = pd.DataFrame(y_test)
#print(name_r)
Regressor = regressors(Name_r,X_train,y_train)
for reg in Regressor :
y_pred = reg.predict(X_test)
wmape = mean_absolute_percentage_error(list(y_test[param]), list(y_pred))
if not np.isnan(wmape):
try:
acc_conf.append([param, reg, wmape[0]])
except:
acc_conf.append([param, reg, wmape])
wmape = pd.DataFrame(acc_conf)
wmape.to_csv('regressors_wmape.csv')
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = | pd.DataFrame(Xi) | pandas.DataFrame |
# here we split the files into 5s segments
# then we calculate the standard deviation of each
# if its below a cut off point then its removed from
# the dataset.
# onwards from here the data is sent to be made into a spectrogram
from pandas import DataFrame
import numpy as np
def split(
pre_df,
sample_rate=22050,
second_split=5,
split_type="quartile",
split_cutoff=0.15,
bar_config=None,
):
labeled = []
for i, row in pre_df.iterrows():
try:
split_buffer = __split_buffer(
row["audio_buffer"], sample_rate, second_split
)
filtered_buffer = __filter_chunks(split_buffer, split_type, split_cutoff)
for buffer in filtered_buffer:
labeled.append((row["label"], buffer))
if bar_config:
bar_config.next()
except ZeroDivisionError:
pass
if bar_config:
bar_config.finish()
return | DataFrame(labeled, columns=["label", "audio_buffer"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Verzeo Minor Project - Data Science.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dr0CQv6tuQq2uGs9Ddj1T_53ukbaT73_
<center>
<h1><b>Prediction of Profit</b></h1>
<h3><b>Verzeo - Data Science - Minor Project</b></h3>
</center>
<b>Name: </b> <NAME>
<br>
<b>Email: </b><EMAIL>
<br>
<b>Program: </b>Data Science October
<br>
<b>Project Description: </b>Perform regression analysis to predict the profit of each the 50 startups mentioned in the dataset by taking all the input parameters(multi linear regression).
<br>
<b>Dataset Link: </b>https://raw.githubusercontent.com/arib168/data/main/50_Startups.csv
# Importing Libraries
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
"""# Importing Dataset"""
file_path = "https://raw.githubusercontent.com/arib168/data/main/50_Startups.csv"
data= | pd.read_csv(file_path) | pandas.read_csv |
'''
Integration tests for pipeline split behavior
'''
__author__ = '<NAME>'
import unittest
import pandas as pd
from abc import ABC, abstractmethod
from simpleml.tests.utils import assert_split_equal
from simpleml.datasets import SingleLabelPandasDataset, MultiLabelPandasDataset
from simpleml.datasets.dataset_splits import Split
from simpleml.pipelines import NoSplitPipeline, ExplicitSplitPipeline, RandomSplitPipeline
from simpleml.pipelines.projected_splits import ProjectedDatasetSplit
class _PipelineSplitTests(ABC):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
def setUp(self):
self.dataset = self.dataset_cls(**self.dataset_params)
self.dataset.dataframe = self.build_dataset()
self.pipeline = self.pipeline_cls(**self.pipeline_params)
self.pipeline.add_dataset(self.dataset)
@property
def dataset_params(self):
return {}
@property
def pipeline_params(self):
return {}
@abstractmethod
def expected_split_contents(self):
pass
@abstractmethod
def build_dataset(self):
pass
@abstractmethod
def example_split_name(self):
pass
def test_getting_splits_with_mutation(self):
'''
Mutate split and re-retrieve
No split behavior passes all the data for any split
'''
split = self.pipeline.get_dataset_split(split=self.example_split_name())
projected_split = split.projected_split
self.assertTrue(isinstance(split, ProjectedDatasetSplit))
self.assertTrue(isinstance(projected_split, Split))
assert_split_equal(projected_split, self.expected_split_contents())
assert_split_equal(split, self.expected_split_contents())
# mutate
projected_split['X'] = None
with self.assertRaises(AssertionError):
assert_split_equal(projected_split, self.expected_split_contents())
# assert equality
new_split = self.pipeline.get_dataset_split(split=self.example_split_name())
assert_split_equal(split, self.expected_split_contents())
assert_split_equal(new_split, self.expected_split_contents())
class NoSplitPipelineSingleLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = SingleLabelPandasDataset
pipeline_cls = NoSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return None
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 3, 'b': 4},
{'a': 4, 'b': 5},
{'a': 5, 'b': 6},
{'a': 6, 'b': 7}]),
y=pd.Series([3, 4, 5, 6, 7, 8], name='c'),
other=pd.Series([5, 6, 7, 8, 9, 10], name='e'))
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'e': 8}]),
pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'e': 10}]),
],
split_names=['first', 'second', 'third']
)
class ExplicitSplitPipelineSingleLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = SingleLabelPandasDataset
pipeline_cls = ExplicitSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return 'first'
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3}]),
y=pd.Series([3, 4], name='c'),
other=pd.Series([5, 6], name='e'))
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'e': 8}]),
pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'e': 10}]),
],
split_names=['first', 'second', 'third']
)
class RandomSplitPipelineSingleLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = SingleLabelPandasDataset
pipeline_cls = RandomSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c'], 'other_named_split_sections': {'other': ['e']}}
@property
def pipeline_params(self):
return {'train_size': 0.5, 'random_state': 10}
def example_split_name(self):
return 'TRAIN'
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 4, 'b': 5},
{'a': 5, 'b': 6},
{'a': 2, 'b': 3}],
index=[3, 4, 1]),
y=pd.Series([6, 7, 4], index=[3, 4, 1], name='c'),
other=pd.Series([8, 9, 6], index=[3, 4, 1], name='e'))
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'e': 8}]),
pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'e': 10}]),
],
split_names=['first', 'second', 'third']
)
class NoSplitPipelineMultiLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = MultiLabelPandasDataset
pipeline_cls = NoSplitPipeline
@property
def dataset_params(self):
return {'label_columns': ['c', 'd'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return None
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 3, 'b': 4},
{'a': 4, 'b': 5},
{'a': 5, 'b': 6},
{'a': 6, 'b': 7}]),
y=pd.DataFrame([
{'c': 3, 'd': 4},
{'c': 4, 'd': 5},
{'c': 5, 'd': 6},
{'c': 6, 'd': 7},
{'c': 7, 'd': 8},
{'c': 8, 'd': 9}]),
other=pd.DataFrame([
{'e': 5},
{'e': 6},
{'e': 7},
{'e': 8},
{'e': 9},
{'e': 10}])
)
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'd': 5, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'd': 6, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'd': 7, 'e': 8}]),
pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'd': 8, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'd': 9, 'e': 10}]),
],
split_names=['first', 'second', 'third']
)
class ExplicitSplitPipelineMultiLabelPandasDatasetSplitTests(_PipelineSplitTests, unittest.TestCase):
'''
Pandas datasets are able to return copies of splits
in case of downstream inplace mutations
Validate consistent and resilient behavior
'''
dataset_cls = MultiLabelPandasDataset
pipeline_cls = ExplicitSplitPipeline
@ property
def dataset_params(self):
return {'label_columns': ['c', 'd'], 'other_named_split_sections': {'other': ['e']}}
def example_split_name(self):
return 'first'
def expected_split_contents(self):
return Split(
X=pd.DataFrame([
{'a': 1, 'b': 2},
{'a': 2, 'b': 3}]),
y=pd.DataFrame([
{'c': 3, 'd': 4},
{'c': 4, 'd': 5}]),
other=pd.DataFrame([
{'e': 5},
{'e': 6}])
)
def build_dataset(self):
return self.dataset_cls.concatenate_dataframes(
dataframes=[
pd.DataFrame([{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}, {'a': 2, 'b': 3, 'c': 4, 'd': 5, 'e': 6}]),
pd.DataFrame([{'a': 3, 'b': 4, 'c': 5, 'd': 6, 'e': 7}, {'a': 4, 'b': 5, 'c': 6, 'd': 7, 'e': 8}]),
| pd.DataFrame([{'a': 5, 'b': 6, 'c': 7, 'd': 8, 'e': 9}, {'a': 6, 'b': 7, 'c': 8, 'd': 9, 'e': 10}]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import openml
from pandas.api.types import is_numeric_dtype
from sklearn.model_selection import cross_validate, train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import f1_score, mean_squared_error
from sklearn.pipeline import Pipeline
from statistics import stdev
from warnings import filterwarnings, resetwarnings
from time import time
from datetime import datetime
from os import mkdir, listdir
from shutil import rmtree
import concurrent
import matplotlib.pyplot as plt
import seaborn as sns
from multiprocessing import Process, Queue
def get_single_dataset(q, dataset_did, dataset_name):
dataset = openml.datasets.get_dataset(dataset_did)
print(f" Loaded {dataset_name} from openml.org")
q.put(dataset)
class DatasetsTester:
"""
Tool to compare predictors (classifiers or regressors) on a set of datasets collected from openml.org.
This simplifies automatically comparing the performance of predictors on potentially large numbers
of datasets, thereby supporting more thorough and accurate testing of predictors.
"""
# have the directories and problem type set here
def __init__(self, problem_type, path_local_cache=""):
"""
problem_type: str
Either "classification" or "regression"
All estimators will be compared using the same metric, so it is necessary that all
datasets used are of the same type.
path_local_cache: str
Folder identify the local cache of datasets, stored in .csv format.
"""
self.problem_type = problem_type
self.path_local_cache = path_local_cache
self.openml_df = None
def check_problem_type(self):
problem_type_okay = self.problem_type in ["classification", "regression", "both"]
if not problem_type_okay:
print("problem_type must be one of: 'classification', 'regression', 'both'")
return problem_type_okay
def find_by_name(self, names_arr):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
Parameters
----------
names_arr: array of dataset names
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of names.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
self.openml_df = self.openml_df[self.openml_df.name.isin(names_arr)]
return self.openml_df
def find_by_tag(self, my_tag):
"""
Identifies, but does not collect, the set of datasets attached to the specified tag.
Parameters
----------
my_tag: the dataset tag
Returns
-------
dataframe with a row for each dataset on openml meeting the specified tag.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(tag=my_tag, output_format="dataframe")
return self.openml_df
def find_datasets(self,
use_cache=True,
min_num_classes=2,
max_num_classes=10,
min_num_minority_class=5,
max_num_minority_class=np.inf,
min_num_features=0,
max_num_features=100,
min_num_instances=500,
max_num_instances=5000,
min_num_numeric_features=0,
max_num_numeric_features=50,
min_num_categorical_features=0,
max_num_categorical_features=50):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
This, find_by_name(), or find_by_tag() must be called to identify the potential set of datasets to be collected.
Parameters
----------
All other parameters are direct checks of the statistics about each dataset provided by openml.org.
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of criteria.
"""
if not self.check_problem_type():
return None
if self.problem_type == "classification" and (min_num_classes <= 0 or max_num_classes <= 0):
print("For classification datasets, both min_num_classes and max_num_classes must be specified.")
return None
read_dataset_list = False # Set True if manage to read from cache. Otherwise read from openml.org.
if use_cache and self.path_local_cache != "":
try:
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df = pd.read_csv(path_to_file)
read_dataset_list = True
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
if not read_dataset_list:
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
if use_cache and self.path_local_cache != "":
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df.to_csv(path_to_file)
# Filter out datasets where some key attributes are unspecified
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.NumberOfFeatures) == False) &
(np.isnan(self.openml_df.NumberOfInstances) == False) &
(np.isnan(self.openml_df.NumberOfInstancesWithMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfNumericFeatures) == False) &
(np.isnan(self.openml_df.NumberOfSymbolicFeatures) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfFeatures >= min_num_features) &
(self.openml_df.NumberOfFeatures <= max_num_features) &
(self.openml_df.NumberOfInstances >= min_num_instances) &
(self.openml_df.NumberOfInstances <= max_num_instances) &
(self.openml_df.NumberOfNumericFeatures >= min_num_numeric_features) &
(self.openml_df.NumberOfNumericFeatures <= max_num_numeric_features) &
(self.openml_df.NumberOfSymbolicFeatures >= min_num_categorical_features) &
(self.openml_df.NumberOfSymbolicFeatures <= max_num_categorical_features)
]
if self.problem_type == "classification":
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.MajorityClassSize) == False) &
(np.isnan(self.openml_df.MaxNominalAttDistinctValues) == False) &
(np.isnan(self.openml_df.MinorityClassSize) == False) &
(np.isnan(self.openml_df.NumberOfClasses) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfClasses >= min_num_classes) &
(self.openml_df.NumberOfClasses <= max_num_classes) &
(self.openml_df.MinorityClassSize >= min_num_minority_class) &
(self.openml_df.MinorityClassSize <= max_num_minority_class)
]
if self.problem_type == "regression":
self.openml_df = self.openml_df[self.openml_df.NumberOfClasses == 0]
return self.openml_df
def collect_data(self,
max_num_datasets_used=-1,
method_pick_sets="pick_random",
shuffle_random_state=0,
exclude_list=None,
use_automatic_exclude_list=False,
max_cat_unique_vals=20,
keep_duplicated_names=False,
check_local_cache=False,
check_online=True,
save_local_cache=False,
preview_data=False,
one_hot_encode=True,
fill_nan_and_inf_zero=True,
verbose=False):
"""
This method collects the data from openml.org, unless check_local_cache is True and the dataset is available
in the local folder. This will collect the specified subset of datasets identified by the most recent call
to find_by_name() or find_datasets(). This allows users to call those methods until a suitable
collection of datasets have been identified.
Parameters
----------
max_num_datasets_used: integer
The maximum number of datasets to collect.
method_pick_sets: str
If only a subset of the full set of matches are to be collected, this identifies if those
will be selected randomly, or simply using the first matches
shuffle_random_state: int
Where method_pick_sets is "pick_random", this is used to shuffle the order of the datasets
exclude_list: array
list of names of datasets to exclude
use_automatic_exclude_list: bool
If set True, any files that can't be loaded will be appended to a list and subsequent calls will not attempt
to load them. This may be set to save time. However, if there are errors simply due to internet problems or
temporary issues, this may erroneously exclude some datasets.
max_cat_unique_vals: int
As categorical columns are one-hot encoded, it may not be desirable to one-hot encode categorical
columns with large numbers of unique values. Columns with a greater number of unique values than
max_cat_unique_vals will be dropped.
keep_duplicated_names: bool
If False, for each set of datasets with the same name, only the one with the highest
version number will be used. In some cases, different versions of a dataset are significantly different.
save_local_cache: bool
If True, any collected datasets will be saved locally in path_local_cache
check_local_cache: bool
If True, before collecting any datasets from openml.org, each will be checked to determine if
it is already stored locally in path_local_cache
check_online: bool
If True, openml.org may be checked for the dataset, unless check_local_cache is True and the dataset has
been cached.
preview_data: bool
Indicates if the first rows of each collected dataset should be displayed
one_hot_encode: bool
If True, categorical columns are one-hot encoded. This is necessary for many types of predictor, but
may be done elsewhere, for example in a pipeline passed to the run_tests() function.
fill_nan_and_inf_zero: bool
If True, all instances of NaN, inf and -inf are replaced with 0.0. Replacing these values with something
valid is necessary for many types of predictor, butmay be done elsewhere, for example in a pipeline passed
to the run_tests() function.
verbose: bool
If True, messages will be displayed indicating errors collecting any datasets.
Returns
-------
dataset_collection: dictionary containing: index in this collection, dataset_name, version, X, y
This method will attempt to collect as many datasets as specified, even where additional datasets must
be examined.
"""
def append_auto_exclude_list(did):
if not use_automatic_exclude_list:
return
auto_exclude_list.append(did)
def read_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "":
return
try:
path_to_file = self.path_local_cache + "/exclude_list.csv"
auto_list_df = pd.read_csv(path_to_file)
except Exception as e:
print(f" Error reading file: {e}")
return
auto_exclude_list = auto_list_df['List'].tolist()
def save_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "" or len(auto_exclude_list) == 0:
return
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/exclude_list.csv"
pd.DataFrame({'List': auto_exclude_list}).to_csv(path_to_file)
assert method_pick_sets in ['pick_first', 'pick_random']
q = Queue()
if self.openml_df is None or len(self.openml_df) == 0:
print("Error. No datasets specified. Call find_datasets() or find_by_name() before collect_data().")
return None
if not keep_duplicated_names:
self.openml_df = self.openml_df.drop_duplicates(subset=["name"], keep="last")
self.dataset_collection = []
#if max_num_datasets_used > -1 and max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
if -1 < max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
openml_subset_df = self.openml_df.sample(frac=1, random_state=shuffle_random_state)
else:
openml_subset_df = self.openml_df
auto_exclude_list = []
read_auto_exclude_list()
usable_dataset_idx = 0
for dataset_idx in range(len(openml_subset_df)):
if (max_num_datasets_used > -1) and (len(self.dataset_collection) >= max_num_datasets_used):
break
dataset_did = int(openml_subset_df.iloc[dataset_idx].did)
dataset_name = openml_subset_df.iloc[dataset_idx]['name']
dataset_version = openml_subset_df.iloc[dataset_idx]['version']
if not exclude_list is None and dataset_name in exclude_list:
continue
if dataset_did in auto_exclude_list:
continue
print(f"Collecting {usable_dataset_idx}: {dataset_name}")
dataset_df = None
dataset_source = ""
if check_local_cache:
try:
path_to_file = self.path_local_cache + "/" + dataset_name + '.csv'
X_with_y = pd.read_csv(path_to_file)
dataset_df = X_with_y.drop("y", axis=1)
y = X_with_y["y"]
dataset_source = "cache"
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
dataset_df = None
if not check_online and dataset_df is None:
continue
if dataset_df is None:
p = Process(target=get_single_dataset, name="get_single_dataset", args=(q, dataset_did, dataset_name))
p.start()
p.join(timeout=20)
if q.empty():
print(f" Unable to collect {dataset_name} from openml.org")
append_auto_exclude_list(dataset_did)
continue
dataset = q.get()
try:
X, y, categorical_indicator, attribute_names = dataset.get_data(
dataset_format="dataframe",
target=dataset.default_target_attribute
)
except Exception as e:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Error: {e}")
append_auto_exclude_list(dataset_did)
continue
if X is None or y is None:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. X or y is None")
append_auto_exclude_list(dataset_did)
continue
dataset_df = pd.DataFrame(X, columns=attribute_names)
if len(dataset_df) != len(y):
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Number rows in X: {len(X)}. Number rows in y: {len(y)}")
append_auto_exclude_list(dataset_did)
continue
if preview_data:
print(dataset_df.head())
if save_local_cache:
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
X_with_y = dataset_df.copy()
X_with_y['y'] = y
X_with_y.to_csv(self.path_local_cache + "/" + dataset_name + '.csv', index=False)
if (self.problem_type == "regression") and (is_numeric_dtype(y) == False):
continue
if dataset_source == "cache":
print(f" Reading from local cache: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
else:
print(f" Loading dataset from openml: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
dataset_df = self.__clean_dataset(dataset_df, max_cat_unique_vals, one_hot_encode,
fill_nan_and_inf_zero)
self.dataset_collection.append({'Index': usable_dataset_idx,
'Dataset_name': dataset_name,
'Dataset_version': dataset_version,
'X': dataset_df,
'y': y})
usable_dataset_idx += 1
save_auto_exclude_list()
def __clean_dataset(self, X, max_cat_unique_vals, one_hot_encode, fill_nan_and_inf_zero):
# The categorical_indicator provided by openml isn't 100% reliable, so we also check panda's is_numeric_dtype
categorical_indicator = [False] * len(X.columns)
for c in range(len(X.columns)):
if not | is_numeric_dtype(X[X.columns[c]]) | pandas.api.types.is_numeric_dtype |
# import pandas, and load the nls weeks worked and college data
import pandas as pd
| pd.set_option('display.width', 80) | pandas.set_option |
import os
import sys
import unittest
from time import strftime, gmtime
import pandas as pd
import numpy as np
import pvl
from plio.io import io_controlnetwork
from plio.io import ControlNetFileV0002_pb2 as cnf
from plio.utils.utils import find_in_dict
from plio.examples import get_path
import pytest
sys.path.insert(0, os.path.abspath('..'))
@pytest.mark.parametrize('cnet_file',
(get_path('apollo_out.net'), get_path('apollo_out_v5.net'))
)
def test_cnet_read(cnet_file):
df = io_controlnetwork.from_isis(cnet_file)
assert len(df) == find_in_dict(df.header, 'NumberOfMeasures')
assert isinstance(df, io_controlnetwork.IsisControlNetwork)
assert len(df.groupby('id')) == find_in_dict(df.header, 'NumberOfPoints')
for proto_field, mangled_field in io_controlnetwork.IsisStore.point_field_map.items():
assert proto_field not in df.columns
assert mangled_field in df.columns
for proto_field, mangled_field in io_controlnetwork.IsisStore.measure_field_map.items():
assert proto_field not in df.columns
assert mangled_field in df.columns
@pytest.mark.parametrize('messagetype, value', [
(2, 0.5),
(3, 0.5),
(4, -0.25),
(5, 1e6),
(6, 1),
(7, -1e10),
('GoodnessOfFit', 0.5),
('MinimumPixelZScore', 0.25)
])
def test_MeasureLog(messagetype, value):
l = io_controlnetwork.MeasureLog(messagetype, value)
if isinstance(messagetype, int):
assert l.messagetype == io_controlnetwork.MeasureMessageType(messagetype)
elif isinstance(messagetype, str):
assert l.messagetype == io_controlnetwork.MeasureMessageType[messagetype]
assert l.value == value
assert isinstance(l.to_protobuf, object)
def test_log_error():
with pytest.raises(TypeError) as err:
io_controlnetwork.MeasureLog(2, 'foo')
def test_to_protobuf():
value = 1.25
int_dtype = 2
log = io_controlnetwork.MeasureLog(int_dtype, value)
proto = log.to_protobuf()
assert proto.doubleDataType == int_dtype
assert proto.doubleDataValue == value
@pytest.fixture
def cnet_dataframe(tmpdir):
npts = 5
serial_times = {295: '1971-07-31T01:24:11.754',
296: '1971-07-31T01:24:36.970'}
serials = {i:'APOLLO15/METRIC/{}'.format(j) for i, j in enumerate(serial_times.values())}
columns = ['id', 'pointType', 'serialnumber', 'measureType',
'sample', 'line', 'image_index', 'pointLog', 'measureLog',
'aprioriCovar']
data = []
for i in range(npts):
aprioriCovar = None
if i == npts - 1:
aprioriCovar = np.ones((2,3))
data.append((i, 2, serials[0], 2, 0, 0, 0, [], [], aprioriCovar))
data.append((i, 2, serials[1], 2, 0, 0, 1, [], [io_controlnetwork.MeasureLog(2, 0.5)],aprioriCovar))
df = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
import os
import sys
import csv
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import KFold
import random
#random.seed(1)
def get_samples(df, nClusters):
model = AgglomerativeClustering(n_clusters=nClusters)
model.fit(df)
labels_idx = {}
for i in range(0, len(model.labels_)):
curr_label = model.labels_[i]
if curr_label in labels_idx:
labels_idx[curr_label].append(i)
else:
labels_idx[curr_label] = [i]
idx_picked = []
for key, value in labels_idx.items():
idx_picked.append(np.random.choice(value))
return idx_picked
def generate(dir_path):
strFilename1 = '{!s}/processed_NonTFs.tsv'.format(dir_path)
strFilename2 = '{!s}/processed_TFs.tsv'.format(dir_path)
strFilename3 = '{!s}/processed_KO.tsv'.format(dir_path)
df1 = | pd.read_csv(strFilename1, sep="\t") | pandas.read_csv |
import pandas as pd
df1 = pd.read_csv('/tigress/np5/all_df.csv', index_col=0)
df2 = | pd.read_csv('/tigress/np5/all_df_params.csv', index_col=0) | pandas.read_csv |
from minder_utils.models.feature_selectors import Intrinsic_Selector, Wrapper_Selector, \
Supervised_Filter, Unsupervised_Filter
from minder_utils.configurations import config
from minder_utils.evaluate import evaluate_features
from minder_utils.models.classifiers.classifiers import Classifiers
from minder_utils.formatting.label import label_by_week
from minder_utils.feature_engineering import Feature_engineer
from minder_utils.formatting import Formatting
from minder_utils.visualisation import Visual_Evaluation
import os
import pandas as pd
import numpy as np
import datetime
os.chdir('..')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
visual = Visual_Evaluation()
test_data = pd.read_csv('./minder_utils/data/weekly_test/fe.csv')
test_data = test_data[pd.to_datetime(test_data.time) > str(datetime.date.today() - datetime.timedelta(days=10))]
fe = Feature_engineer(Formatting())
data = label_by_week(fe.activity)
input_data = label_by_week(fe.activity)
raw_data = input_data[~input_data.valid.isna()]
X = raw_data[fe.info.keys()].to_numpy()
y = raw_data.valid.to_numpy()
y[y < 0] = 0
p_ids = raw_data.id.to_numpy()
test_x = test_data[fe.info.keys()].to_numpy()
ids = []
probabilities = []
for model_type in Classifiers().get_info():
model = Classifiers(model_type)
model.fit(X, y.astype(float))
prediction = model.predict_probs(test_x)
probabilities.append(list(model.predict_probs(test_x)[:, 1]))
probabilities = np.mean(np.array(probabilities), axis=0)
results = {'id': test_data.id.to_list(), 'prediction': probabilities > 0.5, 'confidence': probabilities}
results = | pd.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table( | StringIO(malf) | pandas.compat.StringIO |
import argparse
import os.path as osp
import os
import SimpleITK as sitk
import numpy as np
import json
import pandas as pd
CLASSES = ('background', 'femoral bone', 'femoral cartilage', 'tibial bone', 'tibial cartilage')
pd_classes=('femoral bone', 'tibial bone' ,'femoral cartilage', 'tibial cartilage')
pd_metrics=('dice','asd','rsd','msd','vd','voe')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert OAI ZIB MRI annotations to mmsegmentation format')
parser.add_argument('--nifti-path', help='OAI ZIB MRI nifti path')
parser.add_argument('-o', '--out_dir', help='output from oai_zib_mri_back path')
args = parser.parse_args()
return args
def statisticize(result_dict):
result_mean = | pd.DataFrame(columns=pd_metrics,index=pd_classes) | pandas.DataFrame |
# python3
from utils import MyConfigParser
from typing import List
from io import StringIO
from utils import (
get_json_objects,
flatten_json,
process_ds,
)
import pandas as pd
import boto3
def dump_to_s3(s3, bucket: str, category: str, language: str) -> None:
"""
Extract data through REST API and dump to S3 bucket
:param s3: AWS S3 resource
:param bucket: AWS S3 bucket name
:param category: Filter news by category (i.e., "bitcoin")
:param language: Search news with language (i.e, "en")
"""
# set page size to 100 which is max
page_size = 100
# extract 100 most popular articles for category
newsapi = "https://newsapi.org/v2/everything?"
newsapi += f"qInTitle={category}&from={after}&to={before}&"
newsapi += f"language={language}&pageSize={page_size}&"
newsapi += f"sortBy=popularity&apiKey={NEWS_SECRET_ACCESS_KEY}"
monthly_obs = get_json_objects(newsapi)
if len(monthly_obs["articles"]) == 0:
print(
f"""
ERROR: NewsAPI `{category}` has no observations for
specified time period."""
)
return
data = []
for article in monthly_obs["articles"]:
flat_article = flatten_json(article)
data.append(flat_article)
df = pd.DataFrame(data)
df["source_id"].fillna(
df.source_name.str.replace(" ", "_").str.lower(),
inplace=True,
)
# set index to allow easy groupby
df.index = | pd.to_datetime(df["publishedAt"]) | pandas.to_datetime |
"""
データを2次元に落とし込み、表示する
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def plot_data(data:pd.DataFrame, data_std:np.ndarray, title: str):
"""
データを2次元に落とし込み、プロットする
Paramters
--------
data : pandas.DataFrame
データに加え、クラスタリングのグループ(cluster)を追加したデータ
data_std : numpy.ndarray
データを標準化したデータ
title : str
データの保存に使用する名前
"""
pcaer = PCA(n_components=2)
pcaer.fit(data_std)
x_pca = pcaer.transform(data_std)
pca_df = | pd.DataFrame(x_pca) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = | pd.merge(df4, df2015, on='siteid') | pandas.merge |
import json
import sys, os
import time
import zipfile
import io
import requests
import pandas as pd
###old from zipline.utils.calendars import get_calendar
from trading_calendars import get_calendar
from datetime import datetime
import pytz
from pytz import timezone as _tz # Python only does once, makes this portable.
# Move to top of algo for better efficiency.
from dotenv import load_dotenv
from zipline.utils.paths import data_root
from zipline.data import bundles
def load_env_file():
# Expecting, e.g.:
# IB_ACCOUNTS=DU1234567,U9876543,DU38383838,U92929292
#
# for Sharadar ingestion
# QUANDL_API_KEY=<KEY>
fname = os.environ.get('HCA_ENV_FILE', "")
if not fname:
msg = "No env variable HCA_ENV_FILE, please set."
print(msg)
#st.write(msg)
sys.exit()
load_dotenv(dotenv_path=fname)
success = True
msg = ""
for k in ["ZIPLINE_ROOT", "QUANDL_API_KEY"]:
if k not in os.environ.keys():
msg += f"Please set value of {k} in {fname}\n"
success = False
print(msg)
if success:
print(f"Successfully loaded config file {fname}!")
else:
#st.write(msg)
sys.exit()
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : io.BytesIO
A io.BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return io.BytesIO(resp.content)
def download_url_to_targetfile(url, targetfile_parm="/tmp/tartgetfile", table_parm='SF1'):
"""
Download data from a URL, writing a file in target dir.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
table : str
A Sharadar table name.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
targetfile = targetfile_parm
table = table_parm
resp = requests.get(url, timeout=50)
resp.raise_for_status()
with open(targetfile, 'wb') as f:
f.write(resp.content)
#return io.BytesIO(resp.content)
return resp.status_code
### Start: Sharadar table process code:
load_env_file()
api_key = os.environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
#api_key = '<KEY>' # enter your api key, it can be found in your Quandl account here: https://www.quandl.com/account/profile
#table = SF1' # enter the Sharadar table you would like to retrieve
#csv url = 'https://www.quandl.com/api/v3/datatables/SHARADAR/%s.csv?qopts.export=true&api_key=%s' % (table, api_key) # optionally add parameters to the url to filter the data retrieved, as described in the associated table's documentation, eg here: https://www.quandl.com/databases/SF1/documentation/getting-started
tablef = 'SF1'
tablet = 'TICKERS'
#json
urlf = 'https://www.quandl.com/api/v3/datatables/SHARADAR/%s.json?qopts.export=true&api_key=%s' % (tablef, api_key) # optionally add parameters to the url to filter the data retrieved, as described in the associated table's documentation, eg here: https://www.quandl.com/databases/SF1/documentation/getting-started
urlt = 'https://www.quandl.com/api/v3/datatables/SHARADAR/%s.json?qopts.export=true&api_key=%s' % (tablet, api_key) # optionally add parameters to the url to filter the data retrieved, as described in the associated table's documentation, eg here: https://www.quandl.com/databases/SF1/documentation/getting-started
#version = sys.version.split(' ')[0]
#if version < '3':
#import urllib2
#fn = urllib2.urlopen
#else:
#import urllib
#fn = urllib.request.urlopen
valid = ['fresh','regenerating']
invalid = ['generating']
statusf = statust = ''
while (statusf not in valid) or (statust not in valid):
#Dict = json.loads(fn(url).read())
Dictf = json.loads(download_without_progress(urlf).read())
Dictt = json.loads(download_without_progress(urlt).read())
last_refreshed_timef = Dictf['datatable_bulk_download']['datatable']['last_refreshed_time']
statusf = Dictf['datatable_bulk_download']['file']['status']
linkf = Dictf['datatable_bulk_download']['file']['link']
last_refreshed_timet = Dictt['datatable_bulk_download']['datatable']['last_refreshed_time']
statust = Dictt['datatable_bulk_download']['file']['status']
linkt = Dictt['datatable_bulk_download']['file']['link']
from datetime import datetime
date_string = last_refreshed_timef
last_refreshed_time_dt = datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S %Z')
last_refreshed_time_dir = datetime.strftime(last_refreshed_time_dt, '%Y-%m-%dT%H;%M;%S')
print(last_refreshed_time_dir)
print(statusf)
if statusf not in valid:
time.sleep(60)
print('fetching from %s' % linkf)
this_user_path = os.path.expanduser("~")
zipline_data_root_path = data_root()
fundementals_dir = os.path.join(zipline_data_root_path,'fundem-sharadar-sf1')
#fundementals_dir = os.path.join(this_user_path,'.zipline/data/fundem-sharadar-sf1')
fundem_target_dir = os.path.join(fundementals_dir, last_refreshed_time_dir)
if not os.path.exists(fundem_target_dir):
os.makedirs(fundem_target_dir)
fundem_target_filef = os.path.join(fundem_target_dir, tablef + "_table.zip")
fundem_target_filet = os.path.join(fundem_target_dir, tablet + "_table.zip")
ziplf= (download_url_to_targetfile(linkf, fundem_target_filef, tablef)) #Byte Stream
ziplt= (download_url_to_targetfile(linkt, fundem_target_filet, tablet)) #Byte Stream
print("Done: {} scrape: last_refreshed_timef:{} target_dir:{}".format(tablef, last_refreshed_timef, fundem_target_filef))
print("Done: {} scrape: last_refreshed_timet:{} target_dir:{}".format(tablet, last_refreshed_timet, fundem_target_filet))
fundem_csv = None
with zipfile.ZipFile(fundem_target_filef, 'r') as fundy_zip:
fundem_csv = fundy_zip.infolist()[0]
fundy_zip.extractall(fundem_target_dir)
print("fundem_csv={}".format( fundem_csv))
print("End: ingest quandl-sharadar-" + tablef + "-table")
fundem_csv = None
with zipfile.ZipFile(fundem_target_filet, 'r') as fundy_zip:
fundem_csv = fundy_zip.infolist()[0]
fundy_zip.extractall(fundem_target_dir)
print("fundem_csv={}".format( fundem_csv))
print("End: ingest quandl-sharadar-" + tablet + "-table")
##zipString = (download_without_progress(link).read()) #Byte Stream
####zipString = (fn(link).read()) #Byte Stream
##zipString = str(fn(link).read()) #Str Stream
#z = zipfile.ZipFile(io.BytesIO(zipString))
## The following three lines are alternatives. Use one of them
## according to your need:
##foo = z.read('foo.txt') # Reads the data from "foo.txt"
##foo2 = z.read(z.infolist()[0]) # Reads the data from the first file
#z.extractall() # Copies foo.txt to the filesystem
#z.close()
trading_calendar = get_calendar('NYSE')
###Old-orig bundle_data = bundles.load('quandl')
bundle_data = bundles.load('sharadar-prices')
# Fundementals from Quandl stored in a <name>.csv.zip file on local disk now(future place :AWS S3)
dff = pd.read_csv(fundem_target_filef)#, nrows=1000)
dft = pd.read_csv(fundem_target_filet)#, nrows=1000)
#print(dff.head())
#print(dff.describe())
dff=dff[(dff.dimension=='ARQ')] # Only take As-Reported-Quarterly (ARQ) dimension
dft=dft[(dft.table=='SEP')] # Only take SharadarEquityPrices table
###df.loc[:,'Date'] = pd.to_datetime(df.calendardate)
###df
#df['sid'] = 0 #original: np.nan
###df.set_index('Date', inplace=True)
#dff.info()
###if df.index.tzinfo is None:
### df.index = df.index.tz_localize('UTC')
###dates = df.index.unique()
###print("NumberOfDates={} Dates={}".format(len(dates),dates))
current_time = pd.datetime.utcnow()
start_session = dff.datekey.max()
end_session = current_time
extend_sessions = trading_calendar.sessions_in_range(start_session, end_session)
print ("\n {} Table needs to extend sessions from:max datekey:{} tp current date:{} ExtendRange:{}\n".format(tablef, start_session,end_session,extend_sessions))
def get_sid(row, day):
ticker = row.ticker
#day = pd.to_datetime(row.datekey).tz_localize('US/Eastern')
#day = pd.to_datetime(row.calendardate).tz_localize('US/Eastern')
###print("row={} --- rowday={}".format(row,day))
this_ticker = None
try:
this_ticker = bundle_data.asset_finder.lookup_symbol(ticker, as_of_date= day)
this_sid = this_ticker.sid
#print("Good:Date={} ticker = {} result={} this_sid={}".format(day, ticker, this_ticker, this_sid))
except:
#this_sid = np.nan
this_sid = -1
#print("Bad:Date={} ticker = {} result={} this_sid={}".format(day, ticker, this_ticker, this_sid))
return this_sid
def get_cat(row, day, dft):
ticker = row.ticker
#DomComStk_lst= [
#'Domestic Common Stock',
##'ADR Common Stock',
#'Domestic Common Stock Primary Class',
##'Canadian Common Stock',
##'ADR Common Stock Primary Class',
##'Canadian Common Stock Primary Class',
##'Domestic Common Stock Secondary Class', 'Domestic Stock Warrant',
##'Domestic Preferred Stock', 'ADR Stock Warrant',
##'ADR Preferred Stock', 'ADR Common Stock Secondary Class',
##'Canadian Stock Warrant', 'Canadian Preferred Stock', nan, 'ETF',
##'CEF', 'ETN', 'ETD', 'IDX'
#]
#day = pd.to_datetime(row.datekey).tz_localize('US/Eastern')
#day = pd.to_datetime(row.calendardate).tz_localize('US/Eastern')
#print("row={} --- rowday={}".format(row,day))
this_cat = None
this_ticker = None
try:
this_ticker = bundle_data.asset_finder.lookup_symbol(ticker, as_of_date= day)
this_sid = this_ticker.sid
#melt = melt.loc[melt['col'] == melt['variable'], 'value']
this_cat = dft[dft.ticker==ticker].category.values[0] #str type
#print("Good:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
except:
this_sid = -1
this_cat = None # empty string
#print("Bad:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
return this_cat
def get_exchange(row, day, dft):
ticker = row.ticker
this_cat = None
this_ticker = None
try:
this_ticker = bundle_data.asset_finder.lookup_symbol(ticker, as_of_date= day)
this_sid = this_ticker.sid
#melt = melt.loc[melt['col'] == melt['variable'], 'value']
this_cat = dft[dft.ticker==ticker].exchange.values[0] #str type
#print("Good:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
except:
this_sid = -1
this_cat = None # empty string
#print("Bad:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
return this_cat
def get_isdelisted(row, day, dft):
ticker = row.ticker
this_cat = None
this_ticker = None
try:
this_ticker = bundle_data.asset_finder.lookup_symbol(ticker, as_of_date= day)
this_sid = this_ticker.sid
#melt = melt.loc[melt['col'] == melt['variable'], 'value']
this_cat = dft[dft.ticker==ticker].isdelisted.values[0] #str type
#print("Good:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
except:
this_sid = -1
this_cat = None # empty string
#print("Bad:Date={} ticker = {} cat={} this_sid={}".format(day, ticker, this_cat, this_sid))
return this_cat
# Main computation of linking Sharadar SF1 Fundamentals Table to
dff['sid'] = dff.apply(lambda x:get_sid(x, pd.to_datetime(x.datekey).tz_localize('US/Eastern')), axis=1)
#df = df.assign(sid=Sid_col.values)
dff['category']=dff.apply(lambda x:get_cat(x, | pd.to_datetime(x.datekey) | pandas.to_datetime |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = | pd.DataFrame(data, columns=['a', 'b', 'c']) | pandas.DataFrame |
"""
Discretisation plus Encoding
- What shall we do with the variable after discretisation?
- should we use the buckets as a numerical variable? or
- should we use the intervals as categorical variable?
The answer is, you can do either.
- If you are building decision tree based algorithms and the output of the discretisation are integers
(each integer referring to a bin), then you can use those directly, as decision trees will pick up non-linear
relationships between the discretised variable and the target.
- If you are building linear models instead, the bins may not necessarily hold a linear relationship with the target.
In this case, it may help improve model performance to treat the bins as categories and to one hot encoding,
or target guided encodings like mean encoding, weight of evidence, or target guided ordinal encoding.
We can easily do so by
In this example
- We will perform equal frequency discretisation followed by target guided orginal encoding using the titanic dataset
If instead you would like to do weight of evidence or mean target encoding, you need only replace the ."""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer
# load the numerical variables of the Titanic Dataset
data = | pd.read_csv('dataset/titanic.csv',usecols=['age', 'fare', 'survived']) | pandas.read_csv |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The facenet implmentation has been hard coded into tindetheus. This has been
# hardcoded into tindetheus for the following reasons: 1) there is no setup.py
# for facenet yet. 2) to prevent changes to facenet from breaking tindetheus.
#
# facenet is used to align the database, crop the faces in database, and
# to calculate the embeddings for the database. I've included the copyright
# from facenet below. The specific code that is in this file from facenet
# is within the like_or_dislike_users(self, users) function.
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import input
import sys
import os, shutil
import argparse
import pynder
import pandas as pd
from pynder.errors import RecsTimeout
import matplotlib.pyplot as plt
import imageio
import numpy as np
try:
from urllib.request import urlretrieve
except:
from urllib import urlretrieve
from tindetheus import export_embeddings
from tindetheus import tindetheus_align
import time
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
import tindetheus.facenet_clone.facenet as facenet
from tindetheus.facenet_clone.facenet import to_rgb
import tensorflow as tf
# def to_rgb1(im):
# # convert from grayscale to rgb
# w, h = img.shape
# ret = np.empty((w, h, 3), dtype=np.uint8)
# ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
# return ret
def clean_temp_images():
# delete the temp_images dir
shutil.rmtree('temp_images')
os.makedirs('temp_images')
def clean_temp_images_aligned():
# delete the temp images aligned dir
shutil.rmtree('temp_images_aligned')
def download_url_photos(urls,userID,is_temp=False):
# define a function which downloads the pictures of urls
count = 0
image_list = []
if is_temp == True:
os.makedirs('temp_images/temp')
for url in urls:
if is_temp ==True:
image_list.append('temp_images/temp/'+userID+'.'+str(count)+'.jpg')
else:
image_list.append('temp_images/'+userID+'.'+str(count)+'.jpg')
urlretrieve(url, image_list[-1])
count+=1
return image_list
def move_images_temp(image_list,userID):
# move images from temp folder to al_database
count = 0
database_loc = []
for i,j in enumerate(image_list):
new_fname = 'al_database/'+userID+'.'+str(count)+'.jpg'
os.rename(j,new_fname)
database_loc.append(new_fname)
count+=1
return database_loc
def move_images(image_list,userID, didILike):
# move images from temp folder to database
if didILike == 'Like':
fname = 'like/'
else:
fname = 'dislike/'
count = 0
database_loc = []
for i,j in enumerate(image_list):
new_fname = 'database/'+fname+userID+'.'+str(count)+'.jpg'
os.rename(j,new_fname)
database_loc.append(new_fname)
count+=1
return database_loc
def show_images(images):
# use matplotlib to display profile images
n = len(images)
n_col = 3
if n % n_col == 0:
n_row = n // n_col
else:
n_row = n // 3 + 1
plt.figure()
plt.tight_layout()
for j,i in enumerate(images):
temp_image = imageio.imread(i)
if len(temp_image.shape) < 3:
# needs to be converted to rgb
temp_image = to_rgb(temp_image)
plt.subplot(n_row, n_col, j+1)
plt.imshow(temp_image)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.show(block=False)
plt.pause(0.1)
def calc_avg_emb():
# a function to create a vector of 128 average embeddings for each
# tinder profile
# get the embeddings per profile
labels = np.load('labels.npy')
label_strings = np.load('label_strings.npy')
embeddings = np.load('embeddings.npy')
image_list = np.load('image_list.npy')
# find the maximum number of images in a profile
split_image_list = []
profile_list = []
for i in image_list:
split_image_list.append(i.replace('/','.').split('.'))
profile_list.append(split_image_list[-1][2])
# conver profile list to pandas index
pl = | pd.Index(profile_list) | pandas.Index |
import pandas as pd
#import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
#import seaborn as sns
#from statsmodels.distributions.empirical_distribution import ECDF
from sklearn import preprocessing
from scipy.stats import norm
np.random.seed(42)
df = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000),
"binomial": sct.binom.rvs(100, 0.2, size=10000)})
normal = df.loc[:,"normal"]
binomial = df.loc[:,"binomial"]
m_norm = normal.describe().mean()
v_norm = normal.describe().std()
m_binom = binomial.describe().mean()
v_binom = binomial.describe().std()
resultado = (round(m_binom - m_norm, 3), round(v_binom - v_norm,3))
stars = | pd.read_csv("pulsar_stars.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import win32com.client
import pandas as pd
from pandas import Series, DataFrame
from eunjeon import Mecab
# from konlpy.tag import Komoran # For Test(2021-02-21)
# from datetime import datetime
import datetime
import re
import argparse
import time
import multiprocessing
import numpy as np
_version_ = '0.40'
# Version History
# v0.40(2021-08-29): MS Word, PowerPoint, Text 파일에서 단어 추출후 "단어빈도" 시트에 출처(Source) 항목 추가
# v0.30(2021-04-26): DB table, column comment 파일에서 단어 추출후 "단어빈도" 시트에 출처(Source) 항목 추가
# v0.20(2021-02-21): Multiprocessing 적용 버전
# v0.10(2021-01-10): 최초 작성 버전
def get_word_list(df_text) -> DataFrame:
"""
text 추출결과 DataFrame에서 명사를 추출하여 최종 output을 DataFrame type으로 return
:param df_text: 파일에서 추출한 text(DataFrame type)
:return: 명사, 복합어(1개 이상의 명사, 접두사+명사+접미사) 추출결과(Dataframe type)
"""
start_time = time.time()
df_result = DataFrame()
tagger = Mecab()
# tagger = Komoran()
row_idx = 0
for index, row in df_text.iterrows():
row_idx += 1
if row_idx % 100 == 0: # 100건마다 현재 진행상태 출력
print('[pid:%d] current: %d, total: %d, progress: %3.2f%%' %
(os.getpid(), row_idx, df_text.shape[0], round(row_idx / df_text.shape[0] * 100, 2)))
file_name = row['FileName']
file_type = row['FileType']
page = row['Page']
text = str(row['Text'])
source = (row['Source'])
is_db = True if row['FileType'] in ('table', 'column') else False
is_db_table = True if row['FileType'] == 'table' else False
is_db_column = True if row['FileType'] == 'column' else False
if is_db:
db = row['DB']
schema = row['Schema']
table = row['Table']
if is_db_column:
column = row['Column']
if text is None or text.strip() == '':
continue
try:
# nouns = mecab.nouns(text)
# [O]ToDo: 연속된 체언접두사(XPN), 명사파생접미사(XSN) 까지 포함하여 추출
# [O]ToDo: 명사(NNG, NNP)가 연속될 때 각각 명사와 연결된 복합명사 함께 추출
text_pos = tagger.pos(text)
words = [pos for pos, tag in text_pos if tag in ['NNG', 'NNP', 'SL']] # NNG: 일반명사, NNP: 고유명사
pos_list = [x for (x, y) in text_pos]
tag_list = [y for (x, y) in text_pos]
pos_str = '/'.join(pos_list) + '/'
tag_str = '/'.join(tag_list) + '/'
iterator = re.finditer('(NNP/|NNG/)+(XSN/)*|(XPN/)+(NNP/|NNG/)+(XSN/)*|(SL/)+', tag_str)
for mo in iterator:
x, y = mo.span()
if x == 0:
start_idx = 0
else:
start_idx = tag_str[:x].count('/')
end_idx = tag_str[:y].count('/')
sub_pos = ''
# if end_idx - start_idx > 1 and not (start_idx == 0 and end_idx == len(tag_list)):
if end_idx - start_idx > 1:
for i in range(start_idx, end_idx):
sub_pos += pos_list[i]
# print('%s[sub_pos]' % sub_pos)
words.append('%s[복합어]' % sub_pos) # 추가 형태소 등록
if len(words) >= 1:
# print(nouns, text)
for word in words:
# print(noun, '\t', text)
if not is_db:
# sr_text = Series([file_name, file_type, page, text, word],
# index=['FileName', 'FileType', 'Page', 'Text', 'Word'])
df_word = DataFrame(
{'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text],
'Word': [word], 'Source': [source]})
elif is_db_table:
# sr_text = Series([file_name, file_type, page, text, word, db, schema, table],
# index=['FileName', 'FileType', 'Page', 'Text', 'Word', 'DB', 'Schema', 'Table'])
df_word = DataFrame(
{'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text],
'Word': [word], 'DB': [db], 'Schema': [schema], 'Table': [table], 'Source': [source]})
elif is_db_column:
# sr_text = Series([file_name, file_type, page, text, word, db, schema, table, column],
# index=['FileName', 'FileType', 'Page', 'Text', 'Word', 'DB', 'Schema', 'Table', 'Column'])
df_word = DataFrame(
{'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text],
'Word': [word], 'DB': [db], 'Schema': [schema], 'Table': [table], 'Column': [column],
'Source': [source]})
# df_result = df_result.append(sr_text, ignore_index=True) # Todo: append를 concat으로 바꾸기
df_result = pd.concat([df_result, df_word], ignore_index=True)
except Exception as ex:
print('[pid:%d] Exception has raised for text: %s' % (os.getpid(), text))
print(ex)
print(
'[pid:%d] input text count:%d, extracted word count: %d' % (os.getpid(), df_text.shape[0], df_result.shape[0]))
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_word_list finished. total: %d, elapsed time: %s' %
(os.getpid(), df_text.shape[0], elapsed_time))
return df_result
def get_current_datetime() -> str:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
def get_ppt_text(file_name) -> DataFrame:
"""
ppt 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_ppt_text: %s' % file_name)
ppt_app = win32com.client.Dispatch('PowerPoint.Application')
ppt_file = ppt_app.Presentations.Open(file_name, True)
# result = []
df_text = pd.DataFrame()
page_count = 0
for slide in ppt_file.Slides:
slide_number = slide.SlideNumber
page_count += 1
for shape in slide.Shapes:
shape_text = []
text = ''
if shape.HasTable:
col_cnt = shape.Table.Columns.Count
row_cnt = shape.Table.Rows.Count
for row_idx in range(1, row_cnt + 1):
for col_idx in range(1, col_cnt + 1):
text = shape.Table.Cell(row_idx, col_idx).Shape.TextFrame.TextRange.Text
if text != '':
text = text.replace('\r', ' ')
shape_text.append(text)
elif shape.HasTextFrame:
for paragraph in shape.TextFrame.TextRange.Paragraphs():
text = paragraph.Text
if text != '':
shape_text.append(text)
for text in shape_text:
if text.strip() != '':
sr_text = Series([file_name, 'ppt', slide_number, text, f'{file_name}:{slide_number}:{text}'],
index=['FileName', 'FileType', 'Page', 'Text', 'Source'])
df_text = df_text.append(sr_text, ignore_index=True)
# print(result)
ppt_file.Close()
# print(df_result)
print('text count: %s' % str(df_text.shape[0]))
print('page count: %d' % page_count)
# print(df_text.head(10))
# print(df_result.Paragraph)
# return df_result
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_ppt_text elapsed time: %s' % (os.getpid(), elapsed_time))
# return get_word_list(df_text)
return df_text
def get_doc_text(file_name) -> DataFrame:
"""
doc 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_doc_text: %s' % file_name)
word_app = win32com.client.Dispatch("Word.Application")
word_file = word_app.Documents.Open(file_name, True)
# result = []
df_text = pd.DataFrame()
page = 0
for paragraph in word_file.Paragraphs:
text = paragraph.Range.Text
page = paragraph.Range.Information(3) # 3: wdActiveEndPageNumber(Text의 페이지번호 확인)
if text.strip() != '':
sr_text = Series([file_name, 'doc', page, text, f'{file_name}:{page}:{text}'],
index=['FileName', 'FileType', 'Page', 'Text', 'Source'])
df_text = df_text.append(sr_text, ignore_index=True)
word_file.Close()
print('text count: %s' % str(df_text.shape[0]))
print('page count: %d' % page)
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_doc_text elapsed time: %s' % (os.getpid(), elapsed_time))
# return get_word_list(df_text)
return df_text
def get_txt_text(file_name) -> DataFrame:
"""
txt 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_txt_text: ' + file_name)
df_text = pd.DataFrame()
line_number = 0
with open(file_name, 'rt', encoding='UTF8') as file:
for text in file:
line_number += 1
if text.strip() != '':
sr_text = Series([file_name, 'txt', line_number, text, f'{file_name}:{line_number}:{text}'],
index=['FileName', 'FileType', 'Page', 'Text', 'Source'])
df_text = df_text.append(sr_text, ignore_index=True)
print('text count: %d' % df_text.shape[0])
print('line count: %d' % line_number)
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_txt_text elapsed time: %s' % (os.getpid(), elapsed_time))
# return get_word_list(df_text)
return df_text
def make_word_cloud(df_group, now_dt, out_path):
"""
명사의 빈도를 구한 DataFrame으로 word cloud 그리기
:param df_group: 명사 빈도 DataFrame
:param now_dt: 현재 날짜 시각
:param out_path: 출력경로
:return: None
"""
start_time = time.time()
print('\r\nstart make_word_cloud...')
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# malgun.ttf # NanumSquare.ttf # NanumSquareR.ttf NanumMyeongjo.ttf # NanumBarunpenR.ttf # NanumBarunGothic.ttf
wc = WordCloud(font_path='.\\font\\NanumBarunGothic.ttf',
background_color='white',
max_words=500,
width=1800,
height=1000
)
# print(df_group.head(10))
words = df_group.to_dict()['Freq']
# print(words)
# words = df_group.T.to_dict('list')
wc.generate_from_frequencies(words)
wc.to_file('%s\\wordcloud_%s.png' % (out_path, now_dt))
# plt.axis('off')
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('make_word_cloud elapsed time: %s' % elapsed_time)
# plt.imshow(wc)
# plt.show()
# Todo: 아래아한글 파일(hwp)에서 text 추출
def get_hwp_text(file_name) -> DataFrame:
pass
# Todo: PDF 파일에서 text 추출
def get_pdf_text(file_name) -> DataFrame:
pass
# [O]ToDo: Table, column comment에서 text 추출
def get_db_comment_text(file_name) -> DataFrame:
"""
db_comment 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_db_comment_text: %s' % file_name)
excel_app = win32com.client.Dispatch('Excel.Application')
full_path_file_name = os.path.abspath(file_name)
excel_file = excel_app.Workbooks.Open(full_path_file_name, True)
# region Table comment
table_comment_sheet = excel_file.Worksheets(1)
last_row = table_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
table_comment_range = 'A2:D%s' % (str(last_row))
print('table_comment_range : %s (%d rows)' % (table_comment_range, last_row - 1))
table_comments = table_comment_sheet.Range(table_comment_range).Value2
df_table = pd.DataFrame(list(table_comments),
columns=['DB', 'Schema', 'Table', 'Text'])
df_table['FileName'] = full_path_file_name
df_table['FileType'] = 'table'
df_table['Page'] = 0
df_table = df_table[df_table.Text.notnull()] # Text 값이 없는 행 제거
df_table['Source'] = df_table['DB'] + '.' + df_table['Schema'] + '.' + df_table['Table'] \
+ '(' + df_table['Text'].astype(str) + ')'
# print(df_table)
# endregion
# region Column comment
column_comment_sheet = excel_file.Worksheets(2)
last_row = column_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
column_comment_range = 'A2:E%s' % (str(last_row))
print('column_comment_range : %s (%d rows)' % (column_comment_range, last_row - 1))
column_comments = column_comment_sheet.Range(column_comment_range).Value2
df_column = pd.DataFrame(list(column_comments),
columns=['DB', 'Schema', 'Table', 'Column', 'Text'])
df_column['FileName'] = full_path_file_name
df_column['FileType'] = 'column'
df_column['Page'] = 0
df_column = df_column[df_column.Text.notnull()] # Text 값이 없는 행 제거
df_column['Source'] = df_column['DB'] + '.' + df_column['Schema'] + '.' + df_column['Table'] \
+ '.' + df_column['Column'] + '(' + df_column['Text'].astype(str) + ')'
# print(df_column)
# endregion
excel_file.Close()
df_text = df_column.append(df_table, ignore_index=True)
# print(df_text)
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_db_comment_text elapsed time: %s' % (os.getpid(), elapsed_time))
print('text count: %s' % str(df_text.shape[0]))
# return get_word_list(df_text)
return df_text
def get_file_text(file_name) -> DataFrame:
"""
MS Word, PowerPoint, Text, DB Comment(Excel) file에서 text를 추출하는 함수
:param file_name: 파일명
:return: file에서 추출한 text(DataFrame type)
"""
df_text = | DataFrame() | pandas.DataFrame |
#!/usr/local/bin/python
def pprint_color(obj, flat=False):
jsonpickle.set_preferred_backend('json')
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
if flat is True:
parsed = jsonpickle.encode(obj, unpicklable=False)
else:
parsed = jsonpickle.encode(obj, make_refs=True)
print(
highlight(
parsed,
JsonLexer(),
Terminal256Formatter(style='rainbow_dash')
)
)
def get_dict_from_table(table, clones_dict, check_dict):
table_file = open(table, "r")
table_file_dict = dict()
header = []
for row in table_file:
if re.match('^SEQUENCE_ID', row, re.IGNORECASE):
header = row.rstrip().split("\t")
continue
if not header:
print(header)
print("No header in the file")
sys.exit()
row_list = row.rstrip().split("\t")
row_dict = dict(zip(header, row_list))
if check_dict:
if row_list[0] in clones_dict:
table_file_dict[row_list[0]] = row_dict
else:
table_file_dict[row_list[0]] = row_dict
return(table_file_dict)
def get_sequences(igblast_airr_dict, v_germline_sequences, organism, hv_primer, kv_primer, lv_primer, corrected_regions_file_dict):
header = [
'full_input',
'corrected_input',
'full_input_from_start',
'corrected_input_from_start'
]
sequences_dict = dict()
aux_dict = dict()
for key in igblast_airr_dict.keys():
full_input_from_start = ""
corrected_input = ""
corrected_input_from_start = ""
full_input = igblast_airr_dict[key]['sequence']
vdj_sequences = corrected_regions_file_dict[key]['SEQUENCE_VDJ']
vdj_sequences = re.sub("-", "", vdj_sequences)
full_input = re.match(r'(^\S*' + vdj_sequences + ')', full_input).group(1)
fwr1_start = int(igblast_airr_dict[key]['v_sequence_start']) - 1
v_germline_start = int(igblast_airr_dict[key]['v_germline_start']) - 1
v_germline_id = igblast_airr_dict[key]['v_call'].split(",")[0]
if re.search(r"IGH", v_germline_id):
correction_length = int(hv_primer)
elif re.search(r"IGK", v_germline_id):
correction_length = int(kv_primer)
elif re.search(r"IGL", v_germline_id):
correction_length = int(lv_primer)
v_germ_sequence = v_germline_sequences[v_germline_id].seq
if fwr1_start <= v_germline_start:
if v_germline_start > correction_length:
from_start_nth_nt_germ_seq = v_germ_sequence[:v_germline_start]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[fwr1_start:]
corrected_input = full_input
full_input_from_start = corrected_input_from_start
else:
from_start_nth_nt_germ_seq = v_germ_sequence[:correction_length]
full_input_end = (correction_length - v_germline_start) + fwr1_start
relative_germline_start = correction_length - full_input_end
germline_overlap_seq = from_start_nth_nt_germ_seq[relative_germline_start:]
corrected_input = germline_overlap_seq + full_input[full_input_end :]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[full_input_end:]
full_input_from_start = from_start_nth_nt_germ_seq[:relative_germline_start] + full_input
elif fwr1_start > v_germline_start:
if v_germline_start > correction_length:
from_start_nth_nt_germ_seq = v_germ_sequence[:v_germline_start]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[fwr1_start : ]
corrected_input = full_input[:fwr1_start - v_germline_start] + from_start_nth_nt_germ_seq[:v_germline_start] + full_input[fwr1_start: ]
full_input_from_start = corrected_input
else:
from_start_nth_nt_germ_seq = v_germ_sequence[:correction_length]
full_input_end = (correction_length - v_germline_start) + fwr1_start
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[full_input_end :]
corrected_input = full_input[: fwr1_start - v_germline_start ] + corrected_input_from_start
full_input_from_start = full_input[: fwr1_start - v_germline_start ] + from_start_nth_nt_germ_seq[:v_germline_start] + full_input[fwr1_start:]
sequences_list = [str(full_input), str(corrected_input), str(full_input_from_start), str(corrected_input_from_start)]
aux_dict = dict(zip(header, sequences_list))
sequences_dict[key] = aux_dict
return(sequences_dict)
def check_dict_keys(igblast_dict):
keys_to_check = ['CDR3-IMGT (germline)_from', 'CDR3-IMGT (germline)_to', 'CDR3-IMGT (germline)_length', 'CDR3-IMGT (germline)_matches', 'CDR3-IMGT (germline)_mismatches', 'CDR3-IMGT (germline)_gaps',
'FR1-IMGT_from', 'FR1-IMGT_to', 'FR1-IMGT_length', 'FR1-IMGT_matches', 'FR1-IMGT_mismatches', 'FR1-IMGT_gaps',
'CDR1-IMGT_from', 'CDR1-IMGT_to', 'CDR1-IMGT_length', 'CDR1-IMGT_matches', 'CDR1-IMGT_mismatches', 'CDR1-IMGT_gaps',
'FR2-IMGT_from', 'FR2-IMGT_to', 'FR2-IMGT_length', 'FR2-IMGT_matches', 'FR2-IMGT_mismatches', 'FR2-IMGT_gaps',
'CDR2-IMGT_from', 'CDR2-IMGT_to', 'CDR2-IMGT_length', 'CDR2-IMGT_matches', 'CDR2-IMGT_mismatches', 'CDR2-IMGT_gaps',
'FR3-IMGT_from', 'FR3-IMGT_to', 'FR3-IMGT_length', 'FR3-IMGT_matches', 'FR3-IMGT_mismatches', 'FR3-IMGT_gaps']
for seq in igblast_dict:
for key in keys_to_check:
if key not in igblast_dict[seq]:
igblast_dict[seq][key] = np.nan
return(igblast_dict)
def get_dict_from_igblast_fmt7(clones_dict, igblast_fmt7):
igblast_file = open(igblast_fmt7, "r")
igblast_file_dict = dict()
information_dict = dict()
key = None
header = []
header_list = []
information_all_regions = []
information_flag = False
for row in igblast_file:
if re.match(".*Query: ", row):
key = row.split(" ")[2].rstrip()
continue
if re.match(".*Alignment summary", row):
header = re.search(r'\(.*\)', row).group(0)
header = header.split(",")
header = [element.strip() for element in header]
header[0] = header[0].replace("(", "")
header[-1] = header[-1].replace(")", "")
header_aux = header
information_flag = True
continue
if (re.match("^(?!Total)", row)) and (information_flag):
information_list = row.rstrip().split("\t")
region = information_list[0]
header = [region + "_" + element for element in header]
header_list.append(header)
information_all_regions.append(information_list[1:])
header = header_aux
continue
elif re.match("^Total\t", row):
information_flag = False
flat_header_list = [
item for sublist in header_list for item in sublist
]
flat_information_list = [
item for sublist in information_all_regions for item in sublist
]
information_dict = dict(
zip(flat_header_list, flat_information_list)
)
header_list = []
information_all_regions = []
if key is not None and key in clones_dict:
igblast_file_dict[key] = information_dict
igblast_file_dict_corrected = check_dict_keys(igblast_file_dict)
print("Correction:")
print(igblast_file_dict_corrected)
return(igblast_file_dict_corrected)
def hamming_distance(chaine1, chaine2):
return sum(c1 != c2 for c1, c2 in zip(chaine1, chaine2))
def aminoacids_mismatches(aminoacids_sequences_table):
mismatches_list = []
for i in range(0, aminoacids_sequences_table.shape[0]):
v_germ_seq = str(
aminoacids_sequences_table.iloc[i]['v_germline_alignment_aa']
)
v_seq_aa = str(
aminoacids_sequences_table.iloc[i]['v_sequence_alignment_aa'])
if len(v_germ_seq) > len(v_seq_aa):
v_germ_seq_subset = v_germ_seq[:len(v_seq_aa)]
mismatches_list.append(
hamming_distance(
v_germ_seq_subset,
v_seq_aa))
elif len(v_germ_seq) < len(v_seq_aa):
v_seq_aa_subset = v_seq_aa[:len(v_germ_seq)]
mismatches_list.append(
hamming_distance(
v_germ_seq,
v_seq_aa_subset))
elif len(v_germ_seq) == len(v_seq_aa):
mismatches_list.append(hamming_distance(v_germ_seq, v_seq_aa))
return(mismatches_list)
def select_information(define_clones_dict, igblast_airr_dict, igblast_fmt7_dict, corrected_sequences_dict, correction):
define_clones_pd = | pd.DataFrame(define_clones_dict) | pandas.DataFrame |
# %%
# make plot
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style
import matplotlib as mpl
mpl.style.use('default')
# Functions Section Begins ----------------------------------------------------- #
def dircheck(targetpaths):
"""
dircheck checks the target folder and create the folder if it does not exist.
targetdirlist: list of folderpath
"""
# print(type(targetpaths))
if isinstance(targetpaths, str):
print(os.path.exists(targetpaths))
if not os.path.exists(targetpaths):
os.makedirs(targetpaths)
elif isinstance(targetpaths, list):
for path in targetpaths:
if not os.path.exists(path):
os.makedirs(path)
# Functions Section Ends ----------------------------------------------------- #
# %%
# load the csv file
path = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging'
ippath = os.path.join(path, 'analysis_20190308', 'preprocessing', 'imgintensity')
oppath = os.path.join(path, 'analysis_20190308', 'preprocessing', 'imginfo', 'plot')
# %%
# merge the csv file
filelist = os.listdir(ippath)
print(filelist)
def testgroup(x):
if x == 'w':
return 'wildtype'
elif x == 'k':
return 'knockout'
list_data = []
for filename in filelist:
df = pd.read_csv(os.path.join(ippath, filename), index_col=None, header=0)
df['file_name'] = filename
df['img_name'] = filename.replace('.csv', '')
test = df['file_name'].str.extract(r'^\d{4}_\d{2}_\d{2}_([a-z])[0-9].csv')
df['group'] = test[0].apply(lambda x : testgroup(x))
list_data.append(df)
data = pd.concat(list_data, axis = 0, ignore_index=True)
# %%
display(data.head())
print(data.shape)
# %%
imgnamecreator = lambda x: x.replace('.csv', '')
filelist_imgname = list(map(imgnamecreator, filelist))
print(filelist_imgname)
# %%
data_temp = data.loc[data['img_name'] == filelist_imgname[0]]
display(data_temp)
# %%
labels_group, uni_group = | pd.factorize(data['group']) | pandas.factorize |
# Natural Language Processing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset_train = pd.read_csv('train.txt', delimiter = '\t', quoting = 1)
X_test = pd.read_csv('test.txt', delimiter = '\t', quoting = 1).iloc[:, [0, 1, 2, 3]]
X_train = dataset_train.iloc[:, [0,1,2,3]]
y_train = dataset_train.iloc[:, [4]].values
df_train = pd.DataFrame(X_train)
df_test = | pd.DataFrame(X_test) | pandas.DataFrame |
"""Tools for preprocessing the data from JHU CSSE COVID-19 Data
and the population data from the World Bank.
"""
import numpy as np
import pandas as pd
def csse_covid_19_time_series_csv_to_df(url):
"""From a global csse_covid_19_time_series CSV, creates a pandas
DataFrame with a daily datetime index, first column World, and
remaining columns countries/regions in alphabetical order.
Parameters
----------
url: str
URL to a global csse_covid_19_time_series CSV file.
Example: 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
Returns
-------
df: DataFrame
A DataFrame indexed by days with countries/regions columns.
"""
df = pd.read_csv(url)
df = df.groupby('Country/Region').sum()
df = df.transpose()
df = df.drop(index=['Lat','Long'])
df.index = pd.to_datetime(df.index)
df.index.name ='Date'
df.columns.name = None
# Create column 'World'
world = pd.DataFrame(df.sum(axis=1), columns=['World'])
df = pd.concat([world, df], axis=1)
df.rename(columns={
'Czechia' : 'Czech Republic',
'Taiwan*' : 'Taiwan',
'US' : 'USA',
'Korea, South' : 'South Korea',
'United Kingdom' : 'UK'}, inplace=True)
return df
def align_from_first_ocurrence(df):
"""Aligns and reindex the columns of a DataFrame created with csse_covid_19_time_series_csv_to_df
from their first nonnegative value.
Parameters
----------
df : DataFrame
A DataFrame created with csse_covid_19_time_series_csv_to_df.
Returns
-------
df_aligned : DataFrame
A DataFrame with its columns aligned and reindex by the first nonnegative entry.
"""
df_aligned = | pd.DataFrame() | pandas.DataFrame |
######### imports #########
from ast import arg
from datetime import timedelta
import sys
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_constants import *
from Reff_functions import *
import glob
import os
from sys import argv
import arviz as az
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
from math import ceil
import pickle
from cmdstanpy import CmdStanModel
matplotlib.use("Agg")
from params import (
truncation_days,
start_date,
third_start_date,
alpha_start_date,
omicron_start_date,
omicron_only_date,
omicron_dominance_date,
pop_sizes,
num_forecast_days,
get_all_p_detect_old,
get_all_p_detect,
)
def process_vax_data_array(
data_date,
third_states,
third_end_date,
variant="Delta",
print_latest_date_in_ts=False,
):
"""
Processes the vaccination data to an array for either the Omicron or Delta strain.
"""
# Load in vaccination data by state and date
vaccination_by_state = pd.read_csv(
"data/vaccine_effect_timeseries_" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly
# different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state.loc[
vaccination_by_state["variant"] == variant
]
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
if print_latest_date_in_ts:
# display the latest available date in the NSW data (will be the same date between states)
print(
"Latest date in vaccine data is {}".format(
vaccination_by_state[vaccination_by_state.state == "NSW"].date.values[-1]
)
)
# Get only the dates we need + 1 (this serves as the initial value)
vaccination_by_state = vaccination_by_state[
(
vaccination_by_state.date
>= pd.to_datetime(third_start_date) - timedelta(days=1)
)
& (vaccination_by_state.date <= third_end_date)
]
vaccination_by_state = vaccination_by_state[
vaccination_by_state["state"].isin(third_states)
] # Isolate fitting states
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# If we are missing recent vaccination data, fill it in with the most recent available data.
latest_vacc_data = vaccination_by_state.columns[-1]
if latest_vacc_data < pd.to_datetime(third_end_date):
vaccination_by_state = pd.concat(
[vaccination_by_state]
+ [
pd.Series(vaccination_by_state[latest_vacc_data], name=day)
for day in pd.date_range(start=latest_vacc_data, end=third_end_date)
],
axis=1,
)
# Convert to simple array only useful to pass to stan (index 1 onwards)
vaccination_by_state_array = vaccination_by_state.iloc[:, 1:].to_numpy()
return vaccination_by_state_array
def get_data_for_posterior(data_date):
"""
Read in the various datastreams and combine the samples into a dictionary that we then
dump to a pickle file.
"""
print("Performing inference on state level Reff")
data_date = pd.to_datetime(data_date) # Define data date
print("Data date is {}".format(data_date.strftime("%d%b%Y")))
fit_date = pd.to_datetime(data_date - timedelta(days=truncation_days))
print("Last date in fitting {}".format(fit_date.strftime("%d%b%Y")))
# * Note: 2020-09-09 won't work (for some reason)
# read in microdistancing survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
# read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
# save the case file for convenience
df_state.to_csv("results/cases_" + data_date.strftime("%Y-%m-%d") + ".csv")
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True, moving_window=7)
# df_google = read_in_google(moving=False)
df = df_google.merge(df_Reff[[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]],
on=["date", "state"],
how="inner",
)
######### Create useable dataset #########
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
"NSW",
# "VIC",
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
fit_mask = df.state.isin(first_states)
if fit_post_March:
fit_mask = (fit_mask) & (df.date >= start_date)
fit_mask = (fit_mask) & (df.date <= first_end_date)
second_wave_mask = df.state.isin(sec_states)
second_wave_mask = (second_wave_mask) & (df.date >= sec_start_date)
second_wave_mask = (second_wave_mask) & (df.date <= sec_end_date)
# Add third wave stuff here
third_wave_mask = df.state.isin(third_states)
third_wave_mask = (third_wave_mask) & (df.date >= third_start_date)
third_wave_mask = (third_wave_mask) & (df.date <= third_end_date)
predictors = mov_values.copy()
# predictors.extend(['driving_7days','transit_7days','walking_7days','pc'])
# remove residential to see if it improves fit
# predictors.remove("residential_7days")
df["post_policy"] = (df.date >= ban).astype(int)
dfX = df.loc[fit_mask].sort_values("date")
df2X = df.loc[second_wave_mask].sort_values("date")
df3X = df.loc[third_wave_mask].sort_values("date")
dfX["is_first_wave"] = 0
for state in first_states:
dfX.loc[dfX.state == state, "is_first_wave"] = (
dfX.loc[dfX.state == state]
.date.isin(first_date_range[state])
.astype(int)
.values
)
df2X["is_sec_wave"] = 0
for state in sec_states:
df2X.loc[df2X.state == state, "is_sec_wave"] = (
df2X.loc[df2X.state == state]
.date.isin(sec_date_range[state])
.astype(int)
.values
)
# used to index what dates are featured in omicron AND third wave
omicron_date_range = pd.date_range(start=omicron_start_date, end=third_end_date)
df3X["is_third_wave"] = 0
for state in third_states:
df3X.loc[df3X.state == state, "is_third_wave"] = (
df3X.loc[df3X.state == state]
.date.isin(third_date_range[state])
.astype(int)
.values
)
# condition on being in third wave AND omicron
df3X.loc[df3X.state == state, "is_omicron_wave"] = (
(
df3X.loc[df3X.state == state].date.isin(omicron_date_range)
* df3X.loc[df3X.state == state].date.isin(third_date_range[state])
)
.astype(int)
.values
)
data_by_state = {}
sec_data_by_state = {}
third_data_by_state = {}
for value in ["mean", "std", "local", "imported"]:
data_by_state[value] = pd.pivot(
dfX[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre second wave
if df2X.loc[df2X.state == sec_states[0]].shape[0] == 0:
print("making empty")
sec_data_by_state[value] = pd.DataFrame(columns=sec_states).astype(float)
else:
sec_data_by_state[value] = pd.pivot(
df2X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# account for dates pre pre third wave
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# now add in the summary stats for Omicron Reff
for value in ["mean_omicron", "std_omicron"]:
if df3X.loc[df3X.state == third_states[0]].shape[0] == 0:
print("making empty")
third_data_by_state[value] = pd.DataFrame(columns=third_states).astype(
float
)
else:
third_data_by_state[value] = pd.pivot(
df3X[["state", value, "date"]],
index="date",
columns="state",
values=value,
).sort_index(axis="columns")
# FIRST PHASE
mobility_by_state = []
mobility_std_by_state = []
count_by_state = []
respond_by_state = []
mask_wearing_count_by_state = []
mask_wearing_respond_by_state = []
include_in_first_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: dfX.date.values[-1]]
survey_counts = survey_counts_base.loc[: dfX.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: dfX.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: dfX.date.values[-1]]
for state in first_states:
mobility_by_state.append(dfX.loc[dfX.state == state, predictors].values / 100)
mobility_std_by_state.append(
dfX.loc[dfX.state == state, [val + "_std" for val in predictors]].values / 100
)
count_by_state.append(survey_counts.loc[start_date:first_end_date, state].values)
respond_by_state.append(survey_respond.loc[start_date:first_end_date, state].values)
mask_wearing_count_by_state.append(
mask_wearing_counts.loc[start_date:first_end_date, state].values
)
mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[start_date:first_end_date, state].values
)
include_in_first_wave.append(
dfX.loc[dfX.state == state, "is_first_wave"].values
)
# SECOND PHASE
sec_mobility_by_state = []
sec_mobility_std_by_state = []
sec_count_by_state = []
sec_respond_by_state = []
sec_mask_wearing_count_by_state = []
sec_mask_wearing_respond_by_state = []
include_in_sec_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df2X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df2X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df2X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df2X.date.values[-1]]
for state in sec_states:
sec_mobility_by_state.append(
df2X.loc[df2X.state == state, predictors].values / 100
)
sec_mobility_std_by_state.append(
df2X.loc[df2X.state == state, [val + "_std" for val in predictors]].values / 100
)
sec_count_by_state.append(
survey_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_respond_by_state.append(
survey_respond.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[sec_start_date:sec_end_date, state].values
)
sec_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[sec_start_date:sec_end_date, state].values
)
include_in_sec_wave.append(df2X.loc[df2X.state == state, "is_sec_wave"].values)
# THIRD WAVE
third_mobility_by_state = []
third_mobility_std_by_state = []
third_count_by_state = []
third_respond_by_state = []
third_mask_wearing_count_by_state = []
third_mask_wearing_respond_by_state = []
include_in_third_wave = []
include_in_omicron_wave = []
# filtering survey responses to dates before this wave fitting
survey_respond = survey_respond_base.loc[: df3X.date.values[-1]]
survey_counts = survey_counts_base.loc[: df3X.date.values[-1]]
mask_wearing_respond = mask_wearing_respond_base.loc[: df3X.date.values[-1]]
mask_wearing_counts = mask_wearing_counts_base.loc[: df3X.date.values[-1]]
for state in third_states:
third_mobility_by_state.append(
df3X.loc[df3X.state == state, predictors].values / 100
)
third_mobility_std_by_state.append(
df3X.loc[df3X.state == state, [val + "_std" for val in predictors]].values / 100
)
third_count_by_state.append(
survey_counts.loc[third_start_date:third_end_date, state].values
)
third_respond_by_state.append(
survey_respond.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_count_by_state.append(
mask_wearing_counts.loc[third_start_date:third_end_date, state].values
)
third_mask_wearing_respond_by_state.append(
mask_wearing_respond.loc[third_start_date:third_end_date, state].values
)
include_in_third_wave.append(
df3X.loc[df3X.state == state, "is_third_wave"].values
)
include_in_omicron_wave.append(
df3X.loc[df3X.state == state, "is_omicron_wave"].values
)
# policy boolean flag for after travel ban in each wave
policy = dfX.loc[
dfX.state == first_states[0], "post_policy"
] # this is the post ban policy
policy_sec_wave = [1] * df2X.loc[df2X.state == sec_states[0]].shape[0]
policy_third_wave = [1] * df3X.loc[df3X.state == third_states[0]].shape[0]
# read in the vaccination data
delta_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Delta",
print_latest_date_in_ts=True,
)
omicron_vaccination_by_state_array = process_vax_data_array(
data_date=data_date,
third_states=third_states,
third_end_date=third_end_date,
variant="Omicron",
)
# Make state by state arrays
state_index = {state: i + 1 for i, state in enumerate(states_to_fit_all_waves)}
# dates to apply alpha in the second wave (this won't allow for VIC to be added as
# the date_ranges are different)
apply_alpha_sec_wave = (
sec_date_range["NSW"] >= pd.to_datetime(alpha_start_date)
).astype(int)
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(third_start_date)
).days
omicron_only_day = (
pd.to_datetime(omicron_only_date) - pd.to_datetime(third_start_date)
).days
heterogeneity_start_day = (
pd.to_datetime("2021-08-20") - pd.to_datetime(third_start_date)
).days
# number of days we fit the average VE over
tau_vax_block_size = 3
# get pop size array
pop_size_array = []
for s in states_to_fit_all_waves:
pop_size_array.append(pop_sizes[s])
p_detect = get_all_p_detect_old(
states=third_states,
end_date=third_end_date,
num_days=df3X.loc[df3X.state == "NSW"].shape[0],
)
df_p_detect = pd.DataFrame(p_detect, columns=third_states)
df_p_detect["date"] = third_date_range["NSW"]
df_p_detect.to_csv("results/CA_" + data_date.strftime("%Y-%m-%d") + ".csv")
# p_detect = get_all_p_detect(
# end_date=third_end_date,
# num_days=df3X.loc[df3X.state == "NSW"].shape[0],
# )
# input data block for stan model
input_data = {
"j_total": len(states_to_fit_all_waves),
"N_first": dfX.loc[dfX.state == first_states[0]].shape[0],
"K": len(predictors),
"j_first": len(first_states),
"Reff": data_by_state["mean"].values,
"mob": mobility_by_state,
"mob_std": mobility_std_by_state,
"sigma2": data_by_state["std"].values ** 2,
"policy": policy.values,
"local": data_by_state["local"].values,
"imported": data_by_state["imported"].values,
"N_sec": df2X.loc[df2X.state == sec_states[0]].shape[0],
"j_sec": len(sec_states),
"Reff_sec": sec_data_by_state["mean"].values,
"mob_sec": sec_mobility_by_state,
"mob_sec_std": sec_mobility_std_by_state,
"sigma2_sec": sec_data_by_state["std"].values ** 2,
"policy_sec": policy_sec_wave,
"local_sec": sec_data_by_state["local"].values,
"imported_sec": sec_data_by_state["imported"].values,
"apply_alpha_sec": apply_alpha_sec_wave,
"N_third": df3X.loc[df3X.state == "NSW"].shape[0],
"j_third": len(third_states),
"Reff_third": third_data_by_state["mean"].values,
"Reff_omicron": third_data_by_state["mean_omicron"].values,
"mob_third": third_mobility_by_state,
"mob_third_std": third_mobility_std_by_state,
"sigma2_third": third_data_by_state["std"].values ** 2,
"sigma2_omicron": third_data_by_state["std_omicron"].values ** 2,
"policy_third": policy_third_wave,
"local_third": third_data_by_state["local"].values,
"imported_third": third_data_by_state["imported"].values,
"count_md": count_by_state,
"respond_md": respond_by_state,
"count_md_sec": sec_count_by_state,
"respond_md_sec": sec_respond_by_state,
"count_md_third": third_count_by_state,
"respond_md_third": third_respond_by_state,
"count_masks": mask_wearing_count_by_state,
"respond_masks": mask_wearing_respond_by_state,
"count_masks_sec": sec_mask_wearing_count_by_state,
"respond_masks_sec": sec_mask_wearing_respond_by_state,
"count_masks_third": third_mask_wearing_count_by_state,
"respond_masks_third": third_mask_wearing_respond_by_state,
"map_to_state_index_first": [state_index[state] for state in first_states],
"map_to_state_index_sec": [state_index[state] for state in sec_states],
"map_to_state_index_third": [state_index[state] for state in third_states],
"total_N_p_sec": sum([sum(x) for x in include_in_sec_wave]).item(),
"total_N_p_third": sum([sum(x) for x in include_in_third_wave]).item(),
"include_in_first": include_in_first_wave,
"include_in_sec": include_in_sec_wave,
"include_in_third": include_in_third_wave,
"pos_starts_sec": np.cumsum([sum(x) for x in include_in_sec_wave]).astype(int).tolist(),
"pos_starts_third": np.cumsum(
[sum(x) for x in include_in_third_wave]
).astype(int).tolist(),
"ve_delta_data": delta_vaccination_by_state_array,
"ve_omicron_data": omicron_vaccination_by_state_array,
"omicron_start_day": omicron_start_day,
"omicron_only_day": omicron_only_day,
"include_in_omicron": include_in_omicron_wave,
"total_N_p_third_omicron": int(sum([sum(x) for x in include_in_omicron_wave]).item()),
"pos_starts_third_omicron": np.cumsum(
[sum(x) for x in include_in_omicron_wave]
).astype(int).tolist(),
'tau_vax_block_size': tau_vax_block_size,
'total_N_p_third_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave])
),
'pos_starts_third_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_third_wave]
).astype(int),
'total_N_p_third_omicron_blocks': int(
sum([int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave])
),
'pos_starts_third_omicron_blocks': np.cumsum(
[int(ceil(sum(x)/tau_vax_block_size)) for x in include_in_omicron_wave]
).astype(int),
"pop_size_array": pop_size_array,
"heterogeneity_start_day": heterogeneity_start_day,
"p_detect": p_detect,
}
# dump the dictionary to a json file
with open("results/stan_input_data.pkl", "wb") as f:
pickle.dump(input_data, f)
return None
def run_stan(
data_date,
num_chains=4,
num_samples=1000,
num_warmup_samples=500,
max_treedepth=12,
):
"""
Read the input_data.json in and run the stan model.
"""
data_date = pd.to_datetime(data_date)
# read in the input data as a dictionary
with open("results/stan_input_data.pkl", "rb") as f:
input_data = pickle.load(f)
# make results and figs dir
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
os.makedirs(figs_dir, exist_ok=True)
os.makedirs(results_dir, exist_ok=True)
# path to the stan model
# basic model with a switchover between Reffs
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover.stan"
# mixture model with basic susceptible depletion
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection.stan"
# model that incorporates a waning in infection acquired immunity but is coded as a mixture
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_gamma_mix_waning_infection.stan"
# model that has a switchover but incorporates a waning in infection acquired immunity
# rho_model_gamma = "TP_model/fit_and_forecast/stan_models/TP_switchover_waning_infection_single_md.stan"
# compile the stan model
model = CmdStanModel(stan_file=rho_model_gamma)
# obtain a posterior sample from the model conditioned on the data
fit = model.sample(
chains=num_chains,
iter_warmup=num_warmup_samples,
iter_sampling=num_samples,
data=input_data,
max_treedepth=max_treedepth,
refresh=10
)
# display convergence diagnostics for the current run
print("===========")
print(fit.diagnose())
print("===========")
# save output file to
fit.save_csvfiles(dir=results_dir)
df_fit = fit.draws_pd()
df_fit.to_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_all_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# save a summary file for all parameters; this involves ESS and ESS/s as well as summary stats
fit_summary = fit.summary()
fit_summary.to_csv(filename)
# now save a small summary to easily view key parameters
pars_of_interest = ["bet[" + str(i + 1) + "]" for i in range(5)]
pars_of_interest = pars_of_interest + ["R_Li[" + str(i + 1) + "]" for i in range(8)]
pars_of_interest = pars_of_interest + [
"R_I",
"R_L",
"theta_md",
"theta_masks",
"sig",
"voc_effect_alpha",
"voc_effect_delta",
"voc_effect_omicron",
]
pars_of_interest = pars_of_interest + [
col for col in df_fit if "phi" in col and "simplex" not in col
]
# save a summary for ease of viewing
# output a set of diagnostics
filename = (
figs_dir
+ "fit_summary_main_parameters"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
fit_summary.loc[pars_of_interest].to_csv(filename)
return None
def plot_and_save_posterior_samples(data_date):
"""
Runs the full suite of plotting.
"""
data_date = pd.to_datetime(data_date) # Define data date
figs_dir = (
"figs/stan_fit/stan_fit_"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# read in the posterior sample
samples_mov_gamma = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# * Note: 2020-09-09 won't work (for some reason)
######### Read in microdistancing (md) surveys #########
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys["state"] = surveys["state"].map(states_initials).fillna(surveys["state"])
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# If you get an error here saying 'cannot create a new series when the index is not unique',
# then you have a duplicated md file.
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always = always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
always = always.fillna(method="ffill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
survey_counts_base = (
pd.pivot_table(data=always, index="date", columns="state", values="count")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
survey_respond_base = (
pd.pivot_table(data=always, index="date", columns="state", values="respondents")
.drop(["Australia", "Other"], axis=1)
.astype(int)
)
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest Mask wearing survey is {}".format(mask_wearing.date.values[-1]))
mask_wearing["state"] = (
mask_wearing["state"].map(states_initials).fillna(mask_wearing["state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
# assume values continue forward if survey hasn't completed
mask_wearing_always = mask_wearing_always.fillna(method="ffill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_counts_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="count"
).astype(int)
mask_wearing_respond_base = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="respondents"
).astype(int)
df_Reff = pd.read_csv(
"results/EpyReff/Reff_delta" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff["date"] = df_Reff.INFECTION_DATES
df_Reff["state"] = df_Reff.STATE
df_Reff_omicron = pd.read_csv(
"results/EpyReff/Reff_omicron" + data_date.strftime("%Y-%m-%d") + "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
df_Reff_omicron["date"] = df_Reff_omicron.INFECTION_DATES
df_Reff_omicron["state"] = df_Reff_omicron.STATE
# relabel some of the columns to avoid replication in the merged dataframe
col_names_replace = {
"mean": "mean_omicron",
"lower": "lower_omicron",
"upper": "upper_omicron",
"top": "top_omicron",
"bottom": "bottom_omicron",
"std": "std_omicron",
}
df_Reff_omicron.rename(col_names_replace, axis=1, inplace=True)
# read in NNDSS/linelist data
# If this errors it may be missing a leading zero on the date.
df_state = read_in_cases(
case_file_date=data_date.strftime("%d%b%Y"),
apply_delay_at_read=True,
apply_inc_at_read=True,
)
df_Reff = df_Reff.merge(
df_state,
how="left",
left_on=["state", "date"],
right_on=["STATE", "date_inferred"],
) # how = left to use Reff days, NNDSS missing dates
# merge in the omicron stuff
df_Reff = df_Reff.merge(
df_Reff_omicron,
how="left",
left_on=["state", "date"],
right_on=["state", "date"],
)
df_Reff["rho_moving"] = df_Reff.groupby(["state"])["rho"].transform(
lambda x: x.rolling(7, 1).mean()
) # minimum number of 1
# some days have no cases, so need to fillna
df_Reff["rho_moving"] = df_Reff.rho_moving.fillna(method="bfill")
# counts are already aligned with infection date by subtracting a random incubation period
df_Reff["local"] = df_Reff.local.fillna(0)
df_Reff["imported"] = df_Reff.imported.fillna(0)
######### Read in Google mobility results #########
sys.path.insert(0, "../")
df_google = read_in_google(moving=True)
df = df_google.merge(
df_Reff[
[
"date",
"state",
"mean",
"lower",
"upper",
"top",
"bottom",
"std",
"mean_omicron",
"lower_omicron",
"upper_omicron",
"top_omicron",
"bottom_omicron",
"std_omicron",
"rho",
"rho_moving",
"local",
"imported",
]
],
on=["date", "state"],
how="inner",
)
# ACT and NT not in original estimates, need to extrapolated sorting keeps consistent
# with sort in data_by_state
# Note that as we now consider the third wave for ACT, we include it in the third
# wave fitting only!
states_to_fit_all_waves = sorted(
["NSW", "VIC", "QLD", "SA", "WA", "TAS", "ACT", "NT"]
)
first_states = sorted(["NSW", "VIC", "QLD", "SA", "WA", "TAS"])
fit_post_March = True
ban = "2020-03-20"
first_end_date = "2020-03-31"
# data for the first wave
first_date_range = {
"NSW": pd.date_range(start="2020-03-01", end=first_end_date).values,
"QLD": pd.date_range(start="2020-03-01", end=first_end_date).values,
"SA": pd.date_range(start="2020-03-01", end=first_end_date).values,
"TAS": pd.date_range(start="2020-03-01", end=first_end_date).values,
"VIC": pd.date_range(start="2020-03-01", end=first_end_date).values,
"WA": pd.date_range(start="2020-03-01", end=first_end_date).values,
}
# Second wave inputs
sec_states = sorted([
'NSW',
# 'VIC',
])
sec_start_date = "2020-06-01"
sec_end_date = "2021-01-19"
# choose dates for each state for sec wave
sec_date_range = {
"NSW": pd.date_range(start="2020-06-01", end="2021-01-19").values,
# "VIC": pd.date_range(start="2020-06-01", end="2020-10-28").values,
}
# Third wave inputs
third_states = sorted([
"NSW",
"VIC",
"ACT",
"QLD",
"SA",
"TAS",
# "NT",
"WA",
])
# Subtract the truncation days to avoid right truncation as we consider infection dates
# and not symptom onset dates
third_end_date = data_date - pd.Timedelta(days=truncation_days)
# choose dates for each state for third wave
# Note that as we now consider the third wave for ACT, we include it in
# the third wave fitting only!
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
# "NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": | pd.date_range(start="2021-07-30", end=third_end_date) | pandas.date_range |
# importing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
import os, sys
import argparse
from time import strftime
def open_infile(in_file):
if in_file.endswith(".csv"):
file_name=pd.read_csv(in_file,sep=',')
elif in_file.endswith(".tsv") or in_file.endswith(".txt"):
file_name=pd.read_csv(in_file,sep='\t')
else:
# print the concern and exit the program
print("Upload a properly formatted file!")
sys.exit(0) # or sys.exit(1)
return file_name
def data_processing(file,chromosome):
#create a new column for chromosomes data where they are stored as integers
#replace "X" chromosome as an integer number
file["new_chr"] = file[chromosome].replace(['X', 'Y'], ['23', '24'])
file["new_chr"] = file["new_chr"].astype(int)
return file
def prepare_file(file,pval,chromosome):
# add an index column
file["index"] = [i for i in range(1,len(file[[chromosome]])+1)]
# perform p value log transformation
file["-log10(p)"] = np.where(file[[pval]] > 0, -np.log10(file[[pval]]), 0)
file_new = file.sort_values(chromosome)
return file_new
def edit(file_new,position,chromosome):
# add new position for plotting across x-axis
positions=file_new[[chromosome, position]]
new_pos = []
add=0
for chro,posi in positions.groupby(chromosome):
new_pos.append(posi[[position]]+add)
add+=posi[position].max() # maximum position within each chromosome
# append new positions to the data frame
file_new['new_pos'] = | pd.concat(new_pos) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 - Global Cases - EDA and Forecasting
# This is the data repository for the 2019 Novel Coronavirus Visual Dashboard operated by the Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI Living Atlas Team and the Johns Hopkins University Applied Physics Lab (JHU APL).
#
# Data is sourced from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data
#
#
# * Visual Dashboard (desktop):
# https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6
#
# * Visual Dashboard (mobile):
# http://www.arcgis.com/apps/opsdashboard/index.html#/85320e2ea5424dfaaa75ae62e5c06e61
#
# * Lancet Article:
# An interactive web-based dashboard to track COVID-19 in real time
#
# * Provided by Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE):
# https://systems.jhu.edu/
#
# * Data Sources:
#
# - World Health Organization (WHO): https://www.who.int/
# - DXY.cn. Pneumonia. 2020. http://3g.dxy.cn/newh5/view/pneumonia.
# - BNO News: https://bnonews.com/index.php/2020/02/the-latest-coronavirus-cases/
# - National Health Commission of the People’s Republic of China (NHC):
# http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml
# - China CDC (CCDC): http://weekly.chinacdc.cn/news/TrackingtheEpidemic.htm
# - Hong Kong Department of Health: https://www.chp.gov.hk/en/features/102465.html
# - Macau Government: https://www.ssm.gov.mo/portal/
# - Taiwan CDC: https://sites.google.com/cdc.gov.tw/2019ncov/taiwan?authuser=0
# - US CDC: https://www.cdc.gov/coronavirus/2019-ncov/index.html
# - Government of Canada: https://www.canada.ca/en/public-health/services/diseases/coronavirus.html
# - Australia Government Department of Health: https://www.health.gov.au/news/coronavirus-update-at-a-glance
# - European Centre for Disease Prevention and Control (ECDC): https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases
# - Ministry of Health Singapore (MOH): https://www.moh.gov.sg/covid-19
# - Italy Ministry of Health: http://www.salute.gov.it/nuovocoronavirus
#
# - Additional Information about the Visual Dashboard:
# https://systems.jhu.edu/research/public-health/ncov/
#
# Contact Us:
#
# Email: <EMAIL>
#
# Terms of Use:
#
# This GitHub repo and its contents herein, including all data, mapping, and analysis, copyright 2020 Johns Hopkins University, all rights reserved, is provided to the public strictly for educational and academic research purposes. The Website relies upon publicly available data from multiple sources, that do not always agree. The Johns Hopkins University hereby disclaims any and all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited.
# __For better viewing experience, I recommend to enable NBextensions as guided @__
#
# https://github.com/lsunku/DataScience/tree/master/JupyterNotebook
# # Steps invoved in this notebook
# 1. Import Python Libraries for data analysis and ML
# 2. Local user defined functions
# 3. Sourcing the Data
# 4. Inspect and Clean the Data
# 5. Exploratory Data Analysis
# 6. Preparing the data for modelling(train-test split, rescaling etc)
# 7. Model evaluation for Advanced Regression Criteria
# 8. Linear Regression Model for World Wide Case Predictions
# 9. Linear Regression Model for Italy Predictions
# 10. Linear Regression Model for US Predictions
# 11. Linear Regression Model for Spain Predictions
# 12. Linear Regression Model for Germany Predictions
# 13. Linear Regression Model for India Predictions
# __Notes:__ Currently, I have used only time_series_covid19_confirmed_global for the following analysis. When I get time, I shall enhance the same with additional files time_series_covid19_deaths_global, time_series_covid19_recovered_global and integrate with daily reports.
# # __Import Python Functions__
# In[284]:
# Local classes and Local flags
# Local Classes
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Debug flag for investigative purpose
DEBUG = 0
# Default random_state
rndm_stat = 42
# In[285]:
# Python libraries for Data processing and analysis
import time as time
strt = time.time()
import pandas as pd
| pd.set_option('display.max_columns', 200) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
# In[14]:
# Reading the csv file for gRNA (for HPV16 in our example)
import pandas as pd
import re
import os
import subprocess
import pickle
# In[15]:
# from Bio import SeqIO
# for sequence in SeqIO.parse('HPV16.fasta', "fasta"):
#print(sequence.seq)
# print(len(sequence),'nuceoliotides')
# In[16]:
df1 = pd.read_csv("HIV16_scores.csv")
df1.head()
# In[17]:
len(df1)
# In[18]:
df = df1[['sgRNA Cut Position (1-based)', 'Orientation', 'sgRNA Sequence', 'On-Target Efficacy Score']].copy()
# In[19]:
df.head()
# In[20]:
## Finding the top quartile
ng = len(df)# number of guide candidates
df = df.sort_values(by=["On-Target Efficacy Score"], ascending=False)
# In[21]:
gRNA_list = list(df["sgRNA Sequence"])
GuideName_list = list(df["sgRNA Cut Position (1-based)"])
score_list = list(df["On-Target Efficacy Score"])
# Function to compare two strings
def Str2MatchStr1(str1, str2):
count = 0
l = len(str1)
for i in range(l):
if str1[i]== str2[i]:
count = count+1
return count
match1 = []
match2 = []
similar_nt = []
score_gRNA_A = []
score_gRNA_B = []
GuideName_A = []
GuideName_B = []
l = len(gRNA_list)
for i in range(l-1):
for j in range(i+1,l):
#print(i,j)
x = gRNA_list[i]
row = gRNA_list[j]
score1 = score_list[i]
score2 = score_list[j]
guideA = GuideName_list[i]
guideB = GuideName_list[j]
matchnumber = Str2MatchStr1(x, row)
if (matchnumber >= 15):
match1.append(x)
match2.append(row)
similar_nt.append(matchnumber)
score_gRNA_A.append(score1)
score_gRNA_B.append(score2)
GuideName_A.append(guideA)
GuideName_B.append(guideB)
#print('position', i,'gRNA', x, ' and ','position',j, 'match', row, 'similar nt', matchnumber)
# In[22]:
df_pairs = | pd.DataFrame({'GuideA Position': GuideName_A,'Guide A':match1,'Guide A Score': score_gRNA_A,'GuideB Position': GuideName_B,'Guide B': match2, 'Guide B Score': score_gRNA_B,'Similar NT': similar_nt}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon June 4 00:44:37 2018
@author: istancin
"""
import sys
import traceback
import copy
from weka.core import jvm
import pandas as pd
from main import main_api
def __update_count(count, max_count):
"""
Updating count for next permutation. If all permutations are
done, return -1 as stoping criteria.
:param max_count: list
:param count: list
:return: list
"""
n = len(count)
for i in range(n):
if (count[n-i-1]) < max_count[n-i-1]:
count[n-i-1] += 1
break
elif count == max_count:
count = -1
break
else:
count[n-i-1] = 0
return count
def __append_args(max_count, count, keys, dictionary):
"""
Function that creates current permutation based on count(indexes that
needs to used in this permutation), updates count for next
permutation.
-dictionary keys are arguments to use while its value is a list of
values that needs to be permuted for that key(cmd arg).
-max_count is here as a stoping criteria. It's list that says how
many values are in value list of dictionary.
-count is a list in which we store what are the current elements
that we need to pick from each of value lists in dictionary.
-keys is a list of keys in dictionary
:param max_count: list
:param count: list
:param keys: list
:param dictionary: dictionary
:return: tuple
"""
result = []
for i in range(len(keys)):
result.append(keys[i])
result.append(str(dictionary[keys[i]][count[i]]))
count = __update_count(count, max_count)
return count, result
def __permutations_generator(dictionary, const):
"""
Function that creates all possible permutations of arguments based on
const and dict it gets. For example:
const = ['--algorithm', 'Jrip']
dictionary = {'--path': ['t1.txt', 't2.txt'],
'--N-jrip': ['50', '70']}
Creates fallowing permutations:
['--algorithm', 'Jrip', '--path', 't1.txt', '--N-jrip', '50']
['--algorithm', 'Jrip', '--path', 't1.txt', '--N-jrip', '70']
['--algorithm', 'Jrip', '--path', 't2.txt', '--N-jrip', '50']
['--algorithm', 'Jrip', '--path', 't2.txt', '--N-jrip', '70']
Const part will always be in result while arguments from
dictionary will be permutated.
:param dictionary: dictionary
:param const: list
:yield: list
"""
max_count = list()
count = list()
keys = list()
for k, v in dictionary.items():
max_count.append(len(v)-1)
count.append(0)
keys.append(k)
while count != -1:
args = []
for x in const:
args.append(x)
count, arg = __append_args(max_count, count, keys, dictionary)
args += arg
yield args
def __create_stats_data(args, stats_config, stats_config_default_columns, stats, index, column, accuracy_mean, accuracy_std):
index = tuple([args[args.index(x) + 1] for x in stats_config[0]])
column = [args[args.index(x) + 1] for x in stats_config[1]]
column_mean = tuple(column + [stats_config_default_columns[0]])
column_std = tuple(column + [stats_config_default_columns[1]])
return index, column_mean, column_std
#==============================================================================
# stats_lists = [[args[args.index(x) + 1] for x in stats_config[0]],
# [args[args.index(x) + 1] for x in stats_config[1]]]
# stats_lists_cpy = copy.deepcopy(stats_lists)
# stats_lists[1].append(stats_config_default_columns[0])
# stats_lists_cpy[1].append(stats_config_default_columns[1])
# stats_lists.append(accuracy_mean)
# stats_lists_cpy.append(accuracy_std)
# stats.append(stats_lists)
# stats.append(stats_lists_cpy)
# if stats_lists[0] not in index:
# index.append(stats_lists[0])
# if stats_lists[1] not in column:
# column.append(stats_lists[1])
# if stats_lists_cpy[1] not in column:
# column.append(stats_lists_cpy[1])
# return stats, index, column
#==============================================================================
def __create_excel_stats(stats, xlsx_name):
writer = pd.ExcelWriter(xlsx_name)
stats.to_excel(writer, 'Sheet1')
writer.save()
def main():
"""
Main function for running multiple tests at once. __permutations_generator
creates different all the permutations based on const and
list_variabil_dict that we manualy create here in this function. Those
permutation are actually cmd line arguments which we than use to call
main_api function from main.py. Basiclly it's automation for
runing many different algorithms at once.
const will be in each cmd line.
list_variabil_dict is a list of dicts. Function will iterate
over all dicts create all permutations for each dict.
:param dictionary: dictionary
:param const: list
:yield: list
"""
const = ['/home/linuxmint/main.py', '--filetype', 'csv',
#'--resultdest', 'results.txt',
'--label', 'WINNER', '--I-rf', '200',
'--evaluation', 'cross_validate',
'--prediction', 'no',
'--C-apriori', '0.6', '--N-apriori', '300', '--N-jrip', '60',
'--L-csvload', 'WINNER:1,0']
#==============================================================================
# # List of dicts. Based on each dict permutations of arguments are created and added to
# # const part of arguments with __permutations_generator
#==============================================================================
list_variabil_dict = [{'--algorithm': ['JRip', 'RandomForest', 'J48', 'Logistic', 'NaiveBayes', 'SMO'],
'--path': ['/home/linuxmint/Downloads/dataset_13_14_prediction.csv', '/home/linuxmint/Downloads/dataset_14_15_prediction.csv', '/home/linuxmint/Downloads/dataset_15_16_prediction.csv', '/home/linuxmint/Downloads/dataset_16_17_prediction.csv', '/home/linuxmint/Downloads/dataset_17_18_prediction.csv'],
#'--excludecol': ['WIN_GROUP,WIN_GROUP_OPP,SOME2,SOME2_OPP,FG_PCT,FG_PCT_OPP,MIN,TEAM_CITY,TEAM_ABBREAVIATION,TEAM_NAME,WINS,EPR1,EPR2,EPR3,EPR4,TO,TO1,TO2,TO3,SOME,SOME1,SOME3,SOME4,SOME5,SOME6,SOME11,SOME22,SOME33,SOME44,SOME55,SOME66,NEW,NEW1,OLD,OLD1,FG_PCT_OPP,WINS_OPP,EPR1_OPP,EPR2_OPP,EPR3_OPP,EPR4_OPP,TO_OPP,TO1_OPP,TO2_OPP,TO3_OPP,SOME_OPP,SOME1_OPP,SOME3_OPP,SOME4_OPP,SOME5_OPP,SOME6_OPP,NEW_OPP,NEW1_OPP,OLD_OPP'],
'--includecol': ['AST,OPP_AST,WINNER', 'SAST,OPP_SAST,WINNER', 'AST,SAST,FTAST,PASS,OPP_AST,OPP_SAST,OPP_FTAST,OPP_PASS,WINNER', 'EPR,OPP_EPR,WINNER', 'EPR,AST,OPP_EPR,OPP_AST,WINNER', 'AST,AST_OPP,OPP_AST,OPP_AST_OPP,WINNER', 'SAST,SAST_OPP,OPP_SAST,OPP_SAST_OPP,WINNER', 'AST,SAST,FTAST,PASS,AST_OPP,SAST_OPP,FTAST_OPP,PASS_OPP,OPP_AST,OPP_SAST,OPP_FTAST,OPP_PASS,OPP_AST_OPP,OPP_SAST_OPP,OPP_FTAST_OPP,OPP_PASS_OPP,WINNER', 'EPR,EPR_OPP,OPP_EPR,OPP_EPR_OPP,WINNER', 'EPR,AST,EPR_OPP,AST_OPP,OPP_EPR,OPP_AST,OPP_EPR_OPP,OPP_AST_OPP,WINNER'],
'--exclude-game-team-id': ['no']
},
{'--algorithm': ['JRip', 'RandomForest', 'J48', 'Logistic', 'NaiveBayes', 'SMO'],
'--path': ['/home/linuxmint/Downloads/dataset_13_14_prediction.csv', '/home/linuxmint/Downloads/dataset_14_15_prediction.csv', '/home/linuxmint/Downloads/dataset_15_16_prediction.csv', '/home/linuxmint/Downloads/dataset_16_17_prediction.csv', '/home/linuxmint/Downloads/dataset_17_18_prediction.csv'],
'--includecol': ['UFG_PCT,OPP_UFG_PCT,WINNER', 'CFG_PCT,OPP_CFG_PCT,WINNER', 'UFG_PCT,CFG_PCT,OPP_UFG_PCT,OPP_CFG_PCT,WINNER', 'SOME,OPP_SOME,WINNER', 'SOME1,OPP_SOME1,WINNER', 'SOME2,OPP_SOME2,WINNER', 'SOME3,OPP_SOME3,WINNER', 'SOME4,OPP_SOME4,WINNER', 'SOME5,OPP_SOME5,WINNER', 'SOME6,OPP_SOME6,WINNER', 'SOME22,OPP_SOME22,WINNER' ]
}
]
stats_configs = [(('--path',), ('--algorithm', '--includecol'), 'epr_ast_analysis.xlsx'),
(('--path',), ('--algorithm', '--includecol'), 'UCFG_analysis.xlsx')]
column = list()
stats = list()
index = list()
column = list()
stats_config_default_columns = ['mean', 'std']
for i, variabil_dict in enumerate(list_variabil_dict):
if stats_configs:
stats_config = stats_configs[i]
index = pd.MultiIndex.from_product([variabil_dict[x] for x in stats_config[0]])
temp = [variabil_dict[x] for x in stats_config[1]]
temp.append(stats_config_default_columns)
columns = pd.MultiIndex.from_product(temp)
xlsx_name = stats_config[2]
stats = | pd.DataFrame(index=index, columns=columns) | pandas.DataFrame |
"""Provide ground truth."""
import logging
import os
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from tqdm import tqdm
logger = logging.getLogger(__name__)
def provide_ground_truth(main_dir, date, xml):
ind = xml.find('T')
time = xml[ind+1:ind+7]
overpass_time = datetime.strptime(date+time, '%Y%m%d%H%M%S')
lower_time = overpass_time - timedelta(minutes=30)
upper_time = overpass_time + timedelta(minutes=30)
if sorted(os.listdir('../data/ground_truth')) == sorted(os.listdir(
main_dir + '/ground_truth/')):
logger.info("CSV files already exist.")
return
for file in tqdm(os.listdir('../data/ground_truth'), desc='Create csv for %s .' % date):
logger.info('Extract data from %s' % file)
df = pd.read_csv('../data/ground_truth/' + file, sep=',',index_col=0)
df['TIMESTAMP'] = | pd.to_datetime(df['TIMESTAMP']) | pandas.to_datetime |
import json
import os
import networkx as nx
import pandas as pd
import numpy as np
from .app import Quantum_GUI
from .graph_comp import GraphNode
class run_gui():
def __init__(self, name):
graph = nx.DiGraph()
tdm_table = pd.DataFrame()
delay_table = pd.DataFrame()
self.name = name
self.gui = Quantum_GUI(
graph,
delays=delay_table,
tdm=tdm_table
).get_app(name)
def load_graph(self, path_to_topology=None):
# JSON
if path_to_topology is None:
DIRECTORY, _ = os.path.split(__file__)
with open(DIRECTORY+'/starlight.json') as json_file:
network_in = json.load(json_file)
else:
with open(path_to_topology) as json_file:
network_in = json.load(json_file)
# Delay table initialization
pd.options.display.float_format = '{:.2e}'.format
table = network_in['cchannels_table']
delay_table = pd.DataFrame(table['table'])
delay_table.columns = table['labels']
# TDM table initialization
tdm_default = np.empty(
[len(table['labels']), len(table['labels'])],
dtype=int
)
tdm_default.fill(20000)
index = 0
for x in range(tdm_default.shape[0]):
tdm_default[x][index] = 0
index += 1
tdm_table = | pd.DataFrame(tdm_default) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from optparse import OptionParser
import os
from scipy.stats import entropy
from scipy import signal
import scipy.stats as spstats
import fnmatch
from datetime import datetime
from scipy.stats import skew
from scipy.stats import kurtosis
from scipy.stats import t
from scipy.optimize import fsolve
import scipy.special as sc
# Extracts aggregate features per run from raw eye tracking and oculomotor event data, and builds a single feature matrix for use as input to train and validate a predictive model. If the feature matrix file already exists from a prior run of getFeatureMatrix(), you can save time by specifying useExisting=True to load it directly from the file rather than recomputing it from scratch.
# Research was sponsored by the United States Air Force Research Laboratory and the
# United States Air Force Artificial Intelligence Accelerator and was accomplished
# under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions
# contained in this document are those of the authors and should not be interpreted
# as representing the official policies, either expressed or implied, of the United
# States Air Force or the U.S. Government. The U.S. Government is authorized to
# reproduce and distribute reprints for Government purposes notwithstanding any
# copyright notation herein.
# def main():
# parser = OptionParser()
# parser.add_option('-d', '--dataDir', action="store", dest="dataDir", default=None, help="The top level data directory containing all the raw signal files for each subject.")
# parser.add_option('-o', '--outFilePath', action="store", dest="outFilePath", default=None, help="File to write full feature matrix.");
# (options, args) = parser.parse_args()
# getFeatureMatrix(options.dataDir, options.outFilePath);
def getFeatureMatrix(dataDir, filePath, useExisting):
if useExisting:
if os.path.exists(filePath):
print("Found precomputed feature matrix.")
featMatDF = pd.read_csv(filePath)
print("Loaded into a dataFrame.")
return featMatDF
else:
print(
"Cannot use existing feature matrix because specified file was not found. Recomputing it from scratch."
)
subjDirs = [f.path for f in os.scandir(dataDir) if f.is_dir()]
dfHeader = [
"Subject",
"Session",
"Run",
"OverallGazeEntropyLX",
"psdMaxLX",
"psdFreqOfMaxLX",
"OverallGazeEntropyLY",
"psdMaxLY",
"psdFreqOfMaxLY",
"OverallGazeEntropyLZ",
"psdMaxLZ",
"psdFreqOfMaxLZ",
"OverallGazeEntropyRX",
"psdMaxRX",
"psdFreqOfMaxRX",
"OverallGazeEntropyRY",
"psdMaxRY",
"psdFreqOfMaxRY",
"OverallGazeEntropyRZ",
"psdMaxRZ",
"psdFreqOfMaxRZ",
"EyesClosedFractionL",
"EyesClosedFractionR",
"PupilDiamMeanL",
"PupilDiamStdevL",
"PupilDiamSkewL",
"PupilDiamKurtL",
"PupilDiamMeanR",
"PupilDiamStdevR",
"PupilDiamSkewR",
"PupilDiamKurtR",
"FixDurMean",
"FixDurStdev",
"FixDurSkew",
"FixDurKurt",
"FixDensityMean",
"FixDensityStdev",
"FixDensitySkew",
"FixDensityKurt",
"SacMainSeqMean",
"SacMainSeqStdev",
"SacPeakVelMean",
"SacPeakVelStdev",
]
# walks through the directory structure of the raw data
featMat = []
ctr = 1
for subjd in subjDirs:
sessDirs = [f.path for f in os.scandir(subjd) if f.is_dir()]
print(
"Processing subject "
+ str(ctr)
+ " of "
+ str(len(subjDirs))
+ ": "
+ os.path.basename(subjd)
)
ctr = ctr + 1
for sessd in sessDirs:
runDirs = [f.path for f in os.scandir(sessd) if f.is_dir()]
for rund in runDirs:
dataFiles = [f.path for f in os.scandir(rund) if f.is_file()]
toks = rund.split(os.path.sep)
subj = toks[-3]
sess = toks[-2]
run = toks[-1]
rawEyeFile = fnmatch.filter(dataFiles, "*lslhtcviveeye*.csv")
dfraw = pd.read_csv(rawEyeFile[0])
timeStr = dfraw["time_s"]
datalen = len(timeStr)
if datalen < 10:
continue
# if there is even one corrupted date-time string, skip this whole run.
try:
timesMillis = [convertTimeStrToMillis(f) for f in timeStr]
except ValueError:
print(
"corrupted timestamp string, skipping run = "
+ run
+ ", subj = "
+ subj
+ ", sess = "
+ sess
)
continue
ocuEvtsFile = fnmatch.filter(dataFiles, "*_ocuevts_*.csv")
if len(ocuEvtsFile) < 1:
print(
"No oculomotor events file found for run "
+ run
+ ", subj = "
+ subj
+ ", sess = "
+ sess
)
continue
try:
dfocu = | pd.read_csv(ocuEvtsFile[0]) | pandas.read_csv |
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
def load_data(file1,file2=None):
df1 = | pd.read_csv(file1) | pandas.read_csv |
##########################################
# Share issuance as factor
# December 2018
# <NAME>
##########################################
import pandas as pd
import numpy as np
import os
from pandas.tseries.offsets import *
# Note that ccm, comp and crsp_m are WRDS datasets. However, the code is useful for
# other datasets as long they are panel datasets in conformity to those from WRDS.
# There are some methodology idiosyncrasies of the US dataset, acc. Fama-French (1993),
# but once understood, the adaptation to other country dataset is totally feasible.
###################
# CRSP Block #
###################
## permco is a unique permanent identifier assigned by CRSP to all companies with issues on a CRSP file
## permno identifies a firm's security through all its history, and companies may have several stocks at one time
## shrcd is a two-digit code describing the type of shares traded. The first digit describes the type of security traded.
## exchcd is a code indicating the exchange on which a security is listed
## change variable format to int
crsp_m[['permco','permno','shrcd','exchcd']]=crsp_m[['permco','permno',
'shrcd','exchcd']].astype(int)
## Line up date to be end of month day, no adjustment on time, but on pattern
crsp_m['date']=pd.to_datetime(crsp_m['date'])
crsp_m['jdate']=crsp_m['date']+MonthEnd(0)
crsp_m = crsp_m[(crsp_m['date'].dt.year > 1993)] # This increases velocity of the algorithm,
# but pay attention on this, as it limits the dataset.
## adjusting for delisting return
dlret.permno=dlret.permno.astype(int)
dlret['dlstdt']=pd.to_datetime(dlret['dlstdt'])
dlret['jdate']=dlret['dlstdt']+MonthEnd(0) ## pick the delist date and put into the EoP
## merge the crsp dataset with the dlret on the left indexes
crsp = pd.merge(crsp_m, dlret, how='left',on=['permno','jdate'])
crsp['dlret']=crsp['dlret'].fillna(0)
crsp['ret']=crsp['ret'].fillna(0)
crsp['retadj']=(1+crsp['ret'])*(1+crsp['dlret'])-1 ## adjusting for delisting return
crsp['me']=crsp['prc'].abs()*crsp['shrout'] # calculate market equity
crsp=crsp.drop(['dlret','dlstdt','prc','shrout'], axis=1)
## axis = 0 is the row, and is default, and axis = 1 is the column to drop
crsp=crsp.sort_values(by=['jdate','permco','me'])
## sorting columns ascending = TRUE as default, by the variables: jdate is the adj date by the EoP and
## permco is the CRSP number for stocks, and me is the market equity.
### Aggregate Market Cap ###
## sum of me across different permno belonging to same permco a given date
crsp_summe = crsp.groupby(['jdate','permco'])['me'].sum().reset_index()
## reset the index to the prior numbers as default in pandas,
## and with the changed index still there drop = False as default
# largest mktcap within a permco/date
crsp_maxme = crsp.groupby(['jdate','permco'])['me'].max().reset_index()
# join by jdate/maxme to find the permno
crsp1=pd.merge(crsp, crsp_maxme, how='inner', on=['jdate','permco','me'])
## join : {‘inner’, ‘outer’}, default ‘outer’. Outer for union and inner for intersection.
## drop me column and replace with the sum me
crsp1=crsp1.drop(['me'], axis=1)
## join with sum of me to get the correct market cap info
crsp2=pd.merge(crsp1, crsp_summe, how='inner', on=['jdate','permco'])
## sort by permno and date and also drop duplicates
crsp2=crsp2.sort_values(by=['permno','jdate']).drop_duplicates()
## keep December market cap
crsp2['year']=crsp2['jdate'].dt.year
crsp2['month']=crsp2['jdate'].dt.month
decme=crsp2[crsp2['month']==12]
decme=decme[['permno','date','jdate','me','year']].rename(columns={'me':'dec_me'})
### July to June dates
crsp2['ffdate']=crsp2['jdate']+MonthEnd(-6) ## MonthEnd(-6) is to go six months in the EoM backwards
crsp2['ffyear']=crsp2['ffdate'].dt.year
crsp2['ffmonth']=crsp2['ffdate'].dt.month
crsp2['1+retx']=1+crsp2['retx'] ## retx is the holding period return w/o dividends for a month
crsp2=crsp2.sort_values(by=['permno','date'])
# cumret by stock ## pick the before year
crsp2['cumretx']=crsp2.groupby(['permno','ffyear'])['1+retx'].cumprod() ## compute the cumulative return
## of a year measured by ffyear, the data date backwards six months.
# lag cumret
crsp2['lcumretx']=crsp2.groupby(['permno'])['cumretx'].shift(1)
## shift one row (as default, axis = 0), this leads to the next period.
# lag market cap by one month
crsp2['lme']=crsp2.groupby(['permno'])['me'].shift(1)
## if first permno then use me/(1+retx) to replace the missing value
crsp2['count']=crsp2.groupby(['permno']).cumcount()
crsp2['lme']=np.where(crsp2['count']==0, crsp2['me']/crsp2['1+retx'], crsp2['lme'])
## insert a 'nan' if the count is zero, or pick the lag one market cap.
# baseline me ## pick the first month of this backwards year, and say it is the base.
mebase=crsp2[crsp2['ffmonth']==1][['permno','ffyear', 'lme']].rename(columns={'lme':'mebase'})
## merge result back together
crsp3=pd.merge(crsp2, mebase, how='left', on=['permno','ffyear'])
crsp3['wt']=np.where(crsp3['ffmonth']==1, crsp3['lme'], crsp3['mebase']*crsp3['lcumretx'])
## and really use the returns to take out the dividends distributed (but what about them?)
## wt is the adjusted lag me without dividends basically, by constructing a cum ret measure.
## the weight should have a criterium, and lagged me seems to be it. Not the current
## me, but six months behind one.
#######################
# CCM Block #
#######################
## Compustat and CRSP merged data
ccm['linkdt']=pd.to_datetime(ccm['linkdt']) ## linkdt is a calendar date marking the first effective
## date of the current link. If the link was valid before CRSP's earliest record, LINKDT is set to be
## SAS missing code ".B".
ccm['linkenddt']=pd.to_datetime(ccm['linkenddt']) ## LINKENDDT is the last effective date of the link record.
## It uses the SAS missing code ".E" if a link is still valid.
# if linkenddt is missing then set to today date
ccm['linkenddt']=ccm['linkenddt'].fillna(pd.to_datetime('today'))
###########################
### Net issuance Block ###
###########################
# The previous part is default for the CRSP dataset, but the following is
# the adaptive part to construct other type of portfolios.
# load share issuance original data
# =============================================================================
# os.chdir('C:\\Users\\n3o_A\\Google Drive (<EMAIL>)\\Doutorado Insper\\Finlab\\Finhub project ')
# share_issuance = pd.read_stata('Share_issuance.dta')
# share_issuance = share_issuance[share_issuance['exchcd'] != 0]
# share_issuance = share_issuance[['permno','date','vol','shrout','cfacshr']]
# share_issuance.to_stata('Share_issuance2.dta')
# =============================================================================
# load share issuance simplified data
os.chdir('C:\\Users\\n3o_A\\Google Drive (<EMAIL>)\\Doutorado Insper\\Finlab\\Finhub project')
share_issuance = pd.read_stata('Share_issuance2.dta')
# adjust for nan and zero values
share_issuance = share_issuance[ | pd.notnull(share_issuance['cfacshr']) | pandas.notnull |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for core methods in the Data Commons Python Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pandas.util.testing import assert_series_equal, assert_frame_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json
req = kwargs['json']
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_property_labels.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_labels']:
if req['dcids'] == ['geoId/0649670']:
# Response for sending a single dcid to get_property_labels
out_arcs = ['containedInPlace', 'name', 'geoId', 'typeOf']
res_json = json.dumps({
'geoId/0649670': {
'inLabels': [],
'outLabels': out_arcs
}
})
return MockResponse({"payload": res_json}, 200)
elif req['dcids'] == ['State', 'County', 'City']:
# Response for sending multiple dcids to get_property_labels
in_arcs = ['typeOf']
out_arcs = ['name', 'provenance', 'subClassOf', 'typeOf', 'url']
res_json = json.dumps({
'City': {'inLabels': in_arcs, 'outLabels': out_arcs},
'County': {'inLabels': in_arcs, 'outLabels': out_arcs},
'State': {'inLabels': in_arcs, 'outLabels': out_arcs}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == ['dc/MadDcid']:
# Response for sending a dcid that doesn't exist to get_property_labels
res_json = json.dumps({
'dc/MadDcid': {
'inLabels': [],
'outLabels': []
}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == []:
# Response for sending no dcids to get_property_labels
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_property_values
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_values']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'containedInPlace'\
and req['value_type'] == 'Town':
# Response for sending a request for getting Towns containedInPlace of
# Santa Clara County and Montgomery County.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': 'Los Gatos',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
{
'dcid': 'geoId/0643294',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
}
],
'out': []
},
'geoId/24031': {
'in': [
{
'dcid': 'geoId/2462850',
'name': 'Poolesville',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'name':
# Response for sending a request for the name of multiple dcids.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': [
{
'value': 'Santa Clara County',
'provenanceId': 'dc/sm3m2w3',
},
]
},
'geoId/24031': {
'in': [],
'out': [
{
'value': 'Montgomery County',
'provenanceId': 'dc/sm3m2w3',
},
]
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'madProperty':
# Response for sending a request with a property that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': []
},
'geoId/24031': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']\
and req['property'] == 'containedInPlace':
# Response for sending a request with a single dcid that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
},
'dc/MadDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': {
'in': [],
'out': []
},
'dc/MadderDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == [] and req['property'] == 'containedInPlace':
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_triples
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_triples']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']:
# Response for sending a request with two valid dcids.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'geoId/24031': [
{
"subjectId": "geoId/24031",
"predicate": "name",
"objectValue": "Montgomery County"
},
{
"subjectId": "geoId/2467675",
"subjectName": "Rockville",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/24031",
"objectName": "Montgomery County"
},
{
"subjectId": "geoId/24031",
"predicate": "containedInPlace",
"objectId": "geoId/24",
"objectName": "Maryland"
},
]
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response for sending a request where one dcid does not exist.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'dc/MadDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': [],
'dc/MadderDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == []:
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPropertyLabels(unittest.TestCase):
""" Unit tests for get_property_labels. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_single_dcid(self, post_mock):
""" Calling get_property_labels with a single dcid returns a valid
result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['geoId/0649670'])
self.assertDictEqual(out_props,
{'geoId/0649670': ["containedInPlace", "name", "geoId", "typeOf"]})
# Test with out=False
in_props = dc.get_property_labels(['geoId/0649670'], out=False)
self.assertDictEqual(in_props, {'geoId/0649670': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_labels returns valid results with multiple
dcids.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['State', 'County', 'City']
expected_in = ["typeOf"]
expected_out = ["name", "provenance", "subClassOf", "typeOf", "url"]
# Test for outgoing property labels
out_props = dc.get_property_labels(dcids)
self.assertDictEqual(out_props, {
'State': expected_out,
'County': expected_out,
'City': expected_out,
})
# Test for incoming property labels
in_props = dc.get_property_labels(dcids, out=False)
self.assertDictEqual(in_props, {
'State': expected_in,
'County': expected_in,
'City': expected_in,
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_labels with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['dc/MadDcid'])
self.assertDictEqual(out_props, {'dc/MadDcid': []})
# Test for incoming property labels
in_props = dc.get_property_labels(['dc/MadDcid'], out=False)
self.assertDictEqual(in_props, {'dc/MadDcid': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_labels with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels([])
self.assertDictEqual(out_props, {})
# Test for incoming property labels
in_props = dc.get_property_labels([], out=False)
self.assertDictEqual(in_props, {})
class TestGetPropertyValues(unittest.TestCase):
""" Unit tests for get_property_values. """
# --------------------------- STANDARD UNIT TESTS ---------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_values with multiple dcids returns valid
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['geoId/06085', 'geoId/24031']
# Get the containedInPlace Towns for Santa Clara and Montgomery County.
towns = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
self.assertDictEqual(towns, {
'geoId/06085': ['geoId/0643294', 'geoId/0644112'],
'geoId/24031': ['geoId/2462850']
})
# Get the name of Santa Clara and Montgomery County.
names = dc.get_property_values(dcids, 'name')
self.assertDictEqual(names, {
'geoId/06085': ['Santa Clara County'],
'geoId/24031': ['Montgomery County']
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_values with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
bad_dcids_1 = ['geoId/06085', 'dc/MadDcid']
bad_dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Get entities containedInPlace of Santa Clara County and a dcid that does
# not exist.
contained_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
self.assertDictEqual(contained_1, {
'geoId/06085': ['geoId/0644112'],
'dc/MadDcid': []
})
# Get entities containedInPlace for two dcids that do not exist.
contained_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace')
self.assertDictEqual(contained_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_property(self, post_mock):
""" Calling get_property_values with a property that does not exist returns
empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get propery values for a property that does not exist.
prop_vals = dc.get_property_values(
['geoId/06085', 'geoId/24031'], 'madProperty')
self.assertDictEqual(prop_vals, {
'geoId/06085': [],
'geoId/24031': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_values with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get property values with an empty list of dcids.
prop_vals = dc.get_property_values([], 'containedInPlace')
self.assertDictEqual(prop_vals, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series(self, post_mock):
""" Calling get_property_values with a Pandas Series returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_property_values with a Pandas Series and dcids that does not
exist resturns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series
bad_dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
bad_dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series([['geoId/0644112'], []])
expected_2 = | pd.Series([[], []]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# HEREHEREHERE
#############################################################################
#
# /home/git/clones/external/SAS_3DSpectrographs/py/gratingequation.py
# ; read-quoted-char-radix
#emacs helpers
# (insert (format "\n# " (buffer-file-name)))
#
# (set-input-method 'TeX' t)
# (toggle-input-method)
# (ediff-current-file)
# (wg-python-fix-pdbrc)
# (find-file-other-frame "./.pdbrc")
# (wg-python-fix-pdbrc) # PDB DASH DEBUG end-comments
#
# (setq mypdbcmd (concat (buffer-file-name) "<args...>"))
# (progn (wg-python-fix-pdbrc) (pdb mypdbcmd))
#
# (wg-astroconda-pdb) # IRAF27
# (wg-astroconda3-pdb) # CONDA Python3
#
# (set-background-color "light blue")
# (wg-python-toc)
#
# conda install jupyterlab
# conda install -c conda-forge voila
# conda install nodejs
# jupyter labextension install @jupyter-widgets/jupyterlab-manager
#
# M-x set-input-mode RET TeX (then toggle-input-mode )
#
# (wg-python-toc)
#
#
# __doc__ = """
#
# __author__ = '<NAME>'
#
# __version__ = '0.1'
#
# class GratingException(Exception):
# def __init__(self,message,errors=None):
# @staticmethod
# def __format__(e):
#
# class Grating: # Grating(object) if inherited
# def __init__(self,alpha : "degrees", # Grating::__init__()
# def setalpha(self,newalpha: "degrees"): # Grating::setalpha()
# def setmode(self,newmode: "integer"): # Grating::setmode()
# def setgrating(self,linesmm:float): # Grating::setgrating()
# def setsize(self,length:float ,width:float): # Grating::setsize()
# def setblaze(self,blaze: float): # Grating::setblaze()
# def setlmm(self,lmm: float): # Grating::setlmm()
# def difftable(self,df: pd.DataFrame, key: str): # Grating::difftable()
# def debug(self,msg="",os=sys.stderr): # Grating::debug()
# def grating_quation(self, waverange,step = 0) -> 'radians': # Grating::grating_quation()
# def report(self): # Grating::report()
# def csv(self,fname: 'string'): # Grating::csv()
# def groovedepth(self): # Grating::groovedepth()
# def startplot(self): # Grating::startplot
# def plot(self,keys=[]): # Grating::plot
# def littrow_equation(self,α): # Grating.littrow_equation()
# def peakAngle(self,λ: "angstroms" ): # Grating::peakAngle()
# def phi(self,λ : "angstroms") -> 'degrees': # Grating::phi()
#
# if __name__ == "__main__":
#
#
#
#
#############################################################################
import optparse
import re
import sys
import numpy as np
import pandas as pd
from numpy import sin,cos,arcsin,arccos,radians,degrees
import matplotlib.pyplot as plt
# (wg-python-graphics)
__doc__ = """
gratingequation.py [options]
options:
-p, --plot bool make a Matplotlib plot
-r, --report bool make a 'report' to stdout
-c, --csv <pathname> bool produce a CSV file to path/file name.
-a --alpha float incidence angle
-b --blaze float degrees blaze angle
-m --mode int mode
-l --lmm int lines per mm
This is a basic stand-alone program that is a useful regression
testbed for the Grating class. It permits playing with grating
equations. The idea is to collect attributes of the grating and
provide a set of equations and other functions to compute values for a
grating design. It will produce plots and tables.
Again, this is a regression test for the Grating class. It shows
some of the features (OK I was lazy and did not fully regression
test!). Use it as you will.
<NAME>'s favorite grating web site:
https://www.spectrogon.com/product-services/gratings/grating-design-tool/
handy notes about Pandas
https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf
The red dashed line is the angle of incidence.
The green dotted line is the normal.
the blue dotted line is the angle of diffraction.
"""
__copyright__ = """Copyright 2020 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Open Source Initiative Approved License: MIT
"""
__author__ = '<NAME>'
__version__ = '0.1'
_all__ = ['GratingException','Grating'] # expose these things
##############################################################################
# GratingException
#
##############################################################################
class GratingException(Exception):
"""Special exception to allow differentiated capture of exceptions"""
def __init__(self,message,errors=None):
super(GratingException,self).__init__("Grating "+ message)
self.errors = errors
@staticmethod
def __format__(e):
return "Grating" % e
# GratingException
##############################################################################
# Grating
#
##############################################################################
class Grating: # Grating(object) if inherited
""" Permit more than one instance per run.
ALL units in centimeters. 5000 angstroms is 5e-5 cm.
Gratings are lines per cm
Mode is integer (signed)
blaze is stated in degrees (per manufacture's data, we convert to radians)
"""
# https://en.jeulin.fr/simple-radial-slits-212076.html
ovioslits_cm = np.array([10.0, 20.0, 30.0, 40.0, 50.0, 70.0, 100.0,
150.0, 200.0, 300.0, 500.0, 700.0])/10000.0 # in cm
latex1 = """\\frac{m\\lambda}{d} &= sin(\\alpha) + sin(\\beta)"""
print1 = "mλ/d = sin(β) + sin(α)" # print a basic equation (unicode to rescue)
specrange = np.arange(3300, 8000, 100) # units: angstroms every 10nm useful default range
def __init__(self,alpha : "degrees", # Grating::__init__()
m : "Mode [integer]",
lmm : "lines per mm",
blaze : "degrees",
slit : "microns",
length: "mm" = 25,
width : "mm" = 25):
"""Describe the grating in conventional terms, we use CM and angstroms
as our basis.
Key traits of a Grating:
alpha rotation of grating w.r.t. grating normal
m mode
d count of lines per mm
blaze The blaze angle for this grating
length length of the physical grating surface
width width of the physical grating surface
Developed internally to each instance
wave the wavelength range per call, reflect the last one
dispersion accumulate data for different settings
df Pandas dataframe to accumulate/manage data. Key is alpha+mode+lmm
"""
#super(base,self).__init__()
#self.
self.alpha = alpha # rotation of grating w.r.t. grating normal
self.m = m # mode
self.lmm = float(lmm) # remember lines per mm
self.d = 1.0/(self.lmm * 10) # inverse count of lines per mm
self.blaze = blaze # The blaze angle for this grating
self.length = length # length of the physical grating surface
self.width = width # width of the physical grating surface
self.wave = [] # the wavelength range per call, reflect the last one
self.dispersion = {} # accumulate data for different settings
self.df = None # manage data as a Pandas dataframe
self.notes = [] # strings of user notes
self._fig = None # plot figure.
### Grating.__init__()
def setalpha(self,newalpha: "degrees"): # Grating::setalpha()
"""Update alpha for another go."""
self.alpha = newalpha
return self
### Grating.setalpha()
def setmode(self,newmode: "integer"): # Grating::setmode()
"""Set the new mode"""
self.m = newmode
return self
### Grating.setmode()
def setgrating(self,linesmm:float): # Grating::setgrating()
"""Set the d = lines/mm"""
self.d = float(linesmm)
return self
### Grating.setgrating()
def setsize(self,length:float ,width:float): # Grating::setsize()
"""Set the length/width of physical grating"""
self.length = length
self.width = width
return self
### Grating.setsize()
def setblaze(self,blaze: float): # Grating::setblaze()
"""Set the blaze width"""
self.blaze = float(blaze)
return self
### Grating.setblaze()
def setlmm(self,lmm: float): # Grating::setlmm()
"""Set the lines/mm and remember to update d"""
self.lmm = float(lmm)
self.d = 1.0/(self.lmm * 10) # inverse count of lines per mm
return self
### Grating.setlmm()
def difftable(self,df: pd.DataFrame, key: str): # Grating::difftable()
"""Report forward differences on a column. The column is a 'key'
comprised of alpha and mode"""
col = degrees(df[key].values) # get the angles in radians
fdiff = col[1:] - col[:-1] # first forward diff using np
ratios = fdiff[1:]/fdiff[:-1] # reduce scale.
tmp = pd.DataFrame({'Diff' : fdiff[1:], 'Ratio' : ratios}, index=self.wave[1:-1]*1e8)
print(tmp)
return self
### Grating.difftable()
def debug(self,msg="",os=sys.stderr): # Grating::debug()
"""Help with momentary debugging, file to fit."""
print("Grating - %s " % msg, file=os)
for key,value in self.__dict__.items():
if(key[0] != '_'):
print("%20s = %s" % (key,value),file=os)
return self
### Grating.debug()
__Grating_debug = debug # preserve our debug name if we're inherited
def grating_quation(self, waverange = None,step = 0) -> 'radians': # Grating::grating_quation()
"""Return β from applying the grating equation to a numpy array of wavelengths
(cm), given the conditions that are held constant of this class.
"""
np.seterr(invalid='ignore') # asking for a broad range, sin will blow up.
if(waverange is None):
waverange = self.specrange # provide a decent default visual span
m = float(self.m)
self.wave = waverange / 1.e8 # convert to cm
sinalpha = sin(radians(self.alpha)) # constant
sinb = m * (self.wave / self.d) + sinalpha # spread out to watch.
β = arcsin(sinb) # beta
key = """α={:5.2f}, m={:2d} lmm={:5.2f}""".format(self.alpha,self.m,int(self.lmm))
self.dispersion[key] = degrees(β) # save 'last' result
np.seterr(invalid=None) # reset for other parts of the code.
return β
### Grating.grating_quation()
def report(self): # Grating::report()
"""Create a pandas df, make a report by wavelength."""
if(self.dispersion != []):
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
"""
File util class to read data from File and write data to file
"""
import os
import json
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
class file_utils:
"""
Class contains functions to read different types of files and create dataframe from data
"""
def __init__(self):
"""
Contructor
"""
def read_csv_file(self,filepath,seperator=',',date_columns=[],column_name=[]):
"""
Function to read CSV,TSV flat structured file
Parameter : filepath : str - Input File Path
Parameter : seperator : str - Field Seperator (Default = ,)
Parameter : date_columns : list - List of datetime columns
Parameter : column_name : list - List of column names
Return : df : pd.DataFrame - Pandas Dataframe created using reading Data
"""
try:
if column_name:
df = pd.read_csv(filepath,escapechar="\\",sep=seperator,names=column_name,parse_dates=date_columns)
else:
df = pd.read_csv(filepath,escapechar="\\",sep=seperator,parse_dates=date_columns)
except Exception as e:
raise
return df
def read_excel_file(self,filepath,sheet_name=0,date_columns=[],column_name=[]):
"""
Function to read xlsx file
Parameter : filepath : str - Input File Path
Parameter : sheet_name : str,int or None - str for specific sheet name, int for index no. of sheet and None for all the sheet
Parameter : date_columns : list - List of datetime columns
Parameter : column_name : list - List of column names
Return : df : pd.DataFrame - Pandas Dataframe created using reading Data
"""
try:
if column_name:
df = pd.read_excel(filepath,sheet_name=sheet_name,names=column_name,parse_dates=date_columns)
else:
df = pd.read_excel(filepath,sheet_name=sheet_name,parse_dates=date_columns)
except Exception as e:
raise e
return df
def read_parquet_file(self,filepath):
"""
Function to read parquet file
Parameter : filepath : str - Input File Path
Return : df : pd.DataFrame - Pandas Dataframe created using reading Data
"""
try:
df = pq.read_table(filepath)
df = df.to_pandas()
except Exception as e:
raise e
return df
def read_json(self,input_str):
"""
Function to read json file or Stringyfy Json
Note : Support for only Specific Json Format which are supported by Pandas
Parameter : input_str : str - Input File Path or Stringyfy Json
Return : df : pd.DataFrame - Pandas Dataframe created using reading Data
"""
try:
if(os.path.isfile(input)):
with open(input_str, "r") as json_file:
input_json = json.load(json_file)
df = | pd.DataFrame(input_json) | pandas.DataFrame |
import sys
import unittest
import subprocess
import time
import logging
import numpy as np
import pandas as pd
import swifter
from tqdm.auto import tqdm
from psutil import cpu_count
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-8s.%(msecs)03d %(levelname)-8s %(name)s:%(lineno)-3s %(message)s")
ch.setFormatter(formatter)
LOG.addHandler(ch)
def math_vec_square(x):
return x ** 2
def math_foo(x, compare_to=1):
return x ** 2 if x < compare_to else x ** (1 / 2)
def math_vec_multiply(row):
return row["x"] * row["y"]
def math_agg_foo(row):
return row.sum() - row.min()
def text_foo(row):
if row["letter"] == "A":
return row["value"] * 3
elif row["letter"] == "B":
return row["value"] ** 3
elif row["letter"] == "C":
return row["value"] / 3
elif row["letter"] == "D":
return row["value"] ** (1 / 3)
elif row["letter"] == "E":
return row["value"]
class TestSwifter(unittest.TestCase):
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertDataFrameEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
LOG.info(f"Version {swifter.__version__}")
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataFrameEqual)
self.ncores = cpu_count()
def test_set_npartitions(self):
LOG.info("test_set_npartitions")
for swifter_df, set_npartitions, expected in zip(
[
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
],
[None, 1000, 1001, 1002],
[cpu_count() * 2, 1000, 1001, 1002],
):
before = swifter_df._npartitions
swifter_df.set_npartitions(set_npartitions)
actual = swifter_df._npartitions
self.assertEqual(actual, expected)
if set_npartitions is not None:
self.assertNotEqual(before, actual)
def test_set_dask_threshold(self):
LOG.info("test_set_dask_threshold")
expected = 1000
for swifter_df in [
pd.DataFrame().swifter,
| pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
import six
import re
import os
from pandas.api.types import is_scalar, is_integer
import numpy as np
import pandas as pd
import h5py
def partition(start, stop, step):
"""Partition an integer interval into equally-sized subintervals.
Like builtin :py:func:`range`, but yields pairs of end points.
Examples
--------
>>> for lo, hi in partition(0, 9, 2):
print(lo, hi)
0 2
2 4
4 6
6 8
8 9
"""
return ((i, min(i + step, stop)) for i in range(start, stop, step))
def parse_cooler_uri(s):
"""
Parse a Cooler URI string
e.g. /path/to/mycoolers.cool::/path/to/cooler
"""
parts = s.split("::")
if len(parts) == 1:
file_path, group_path = parts[0], "/"
elif len(parts) == 2:
file_path, group_path = parts
if not group_path.startswith("/"):
group_path = "/" + group_path
else:
raise ValueError("Invalid Cooler URI string")
return file_path, group_path
def atoi(s):
return int(s.replace(",", ""))
def parse_humanized(s):
_NUMERIC_RE = re.compile("([0-9,.]+)")
_, value, unit = _NUMERIC_RE.split(s.replace(",", ""))
if not len(unit):
return int(value)
value = float(value)
unit = unit.upper().strip()
if unit in ("K", "KB"):
value *= 1000
elif unit in ("M", "MB"):
value *= 1000000
elif unit in ("G", "GB"):
value *= 1000000000
else:
raise ValueError("Unknown unit '{}'".format(unit))
return int(value)
def parse_region_string(s):
"""
Parse a UCSC-style genomic region string into a triple.
Parameters
----------
s : str
UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA
style sequence names are allowed. End coordinate must be greater than
or equal to start.
Returns
-------
(str, int or None, int or None)
"""
def _tokenize(s):
token_spec = [
("HYPHEN", r"-"),
("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"),
("OTHER", r".+"),
]
tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec)
tok_regex = re.compile(tok_regex, re.IGNORECASE)
for match in tok_regex.finditer(s):
typ = match.lastgroup
yield typ, match.group(typ)
def _check_token(typ, token, expected):
if typ is None:
raise ValueError("Expected {} token missing".format(" or ".join(expected)))
else:
if typ not in expected:
raise ValueError('Unexpected token "{}"'.format(token))
def _expect(tokens):
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["COORD"])
start = parse_humanized(token)
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["HYPHEN"])
typ, token = next(tokens, (None, None))
if typ is None:
return start, None
_check_token(typ, token, ["COORD"])
end = parse_humanized(token)
if end < start:
raise ValueError("End coordinate less than start")
return start, end
parts = s.split(":")
chrom = parts[0].strip()
if not len(chrom):
raise ValueError("Chromosome name cannot be empty")
if len(parts) < 2:
return (chrom, None, None)
start, end = _expect(_tokenize(parts[1]))
return (chrom, start, end)
def parse_region(reg, chromsizes=None):
"""
Genomic regions are represented as half-open intervals (0-based starts,
1-based ends) along the length coordinate of a contig/scaffold/chromosome.
Parameters
----------
reg : str or tuple
UCSC-style genomic region string, or
Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``.
chromsizes : mapping, optional
Lookup table of scaffold lengths to check against ``chrom`` and the
``end`` coordinate. Required if ``end`` is not supplied.
Returns
-------
A well-formed genomic region triple (str, int, int)
"""
if isinstance(reg, six.string_types):
chrom, start, end = parse_region_string(reg)
else:
chrom, start, end = reg
start = int(start) if start is not None else start
end = int(end) if end is not None else end
try:
clen = chromsizes[chrom] if chromsizes is not None else None
except KeyError:
raise ValueError("Unknown sequence label: {}".format(chrom))
start = 0 if start is None else start
if end is None:
if clen is None: # TODO --- remove?
raise ValueError("Cannot determine end coordinate.")
end = clen
if end < start:
raise ValueError("End cannot be less than start")
if start < 0 or (clen is not None and end > clen):
raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end))
return chrom, start, end
def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def argnatsort(array):
array = np.asarray(array)
if not len(array):
return np.array([], dtype=int)
cols = tuple(zip(*(natsort_key(x) for x in array)))
return np.lexsort(cols[::-1])
def read_chromsizes(
filepath_or,
name_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"),
all_names=False,
**kwargs
):
"""
Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC
database, where ``db`` is a genome assembly name.
Parameters
----------
filepath_or : str or file-like
Path or url to text file, or buffer.
name_patterns : sequence, optional
Sequence of regular expressions to capture desired sequence names.
Each corresponding set of records will be sorted in natural order.
all_names : bool, optional
Whether to return all contigs listed in the file. Default is
``False``.
Returns
-------
:py:class:`pandas.Series`
Series of integer bp lengths indexed by sequence name.
References
----------
* `UCSC assembly terminology <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>`_
* `GRC assembly terminology <https://www.ncbi.nlm.nih.gov/grc/help/definitions>`_
"""
if isinstance(filepath_or, six.string_types) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
chromtable = pd.read_csv(
filepath_or,
sep="\t",
usecols=[0, 1],
names=["name", "length"],
dtype={"name": str},
**kwargs
)
if not all_names:
parts = []
for pattern in name_patterns:
part = chromtable[chromtable["name"].str.contains(pattern)]
part = part.iloc[argnatsort(part["name"])]
parts.append(part)
chromtable = pd.concat(parts, axis=0)
chromtable.index = chromtable["name"].values
return chromtable["length"]
def fetch_chromsizes(db, **kwargs):
"""
Download chromosome sizes from UCSC as a :py:class:`pandas.Series`, indexed
by chromosome label.
"""
return read_chromsizes(
"http://hgdownload.cse.ucsc.edu/goldenPath/{}/database/chromInfo.txt.gz".format(
db
),
**kwargs
)
def load_fasta(names, *filepaths):
"""
Load lazy FASTA records from one or multiple files without reading them
into memory.
Parameters
----------
names : sequence of str
Names of sequence records in FASTA file or files.
filepaths : str
Paths to one or more FASTA files to gather records from.
Returns
-------
OrderedDict of sequence name -> sequence record
"""
import pyfaidx
if len(filepaths) == 0:
raise ValueError("Need at least one file")
if len(filepaths) == 1:
fa = pyfaidx.Fasta(filepaths[0], as_raw=True)
else:
fa = {}
for filepath in filepaths:
fa.update(pyfaidx.Fasta(filepath, as_raw=True).records)
records = OrderedDict((chrom, fa[chrom]) for chrom in names)
return records
def binnify(chromsizes, binsize):
"""
Divide a genome into evenly sized bins.
Parameters
----------
chromsizes : Series
pandas Series indexed by chromosome name with chromosome lengths in bp.
binsize : int
size of bins in bp
Returns
-------
bins : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
def _each(chrom):
clen = chromsizes[chrom]
n_bins = int(np.ceil(clen / binsize))
binedges = np.arange(0, (n_bins + 1)) * binsize
binedges[-1] = clen
return pd.DataFrame(
{"chrom": [chrom] * n_bins, "start": binedges[:-1], "end": binedges[1:]},
columns=["chrom", "start", "end"],
)
bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)
bintable["chrom"] = pd.Categorical(
bintable["chrom"], categories=list(chromsizes.index), ordered=True
)
return bintable
make_bintable = binnify
def digest(fasta_records, enzyme):
"""
Divide a genome into restriction fragments.
Parameters
----------
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
enzyme: str
Name of restriction enzyme (e.g., 'DpnII').
Returns
-------
frags : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
try:
import Bio.Restriction as biorst
import Bio.Seq as bioseq
except ImportError:
raise ImportError("Biopython is required to find restriction fragments.")
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError("Unknown enzyme name: {}".format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pd.DataFrame(
{"chrom": [chrom] * n_frags, "start": cuts[:-1], "end": cuts[1:]},
columns=["chrom", "start", "end"],
)
return frags
return pd.concat(map(_each, chroms), axis=0, ignore_index=True)
def get_binsize(bins):
"""
Infer bin size from a bin DataFrame. Assumes that the last bin of each
contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
sizes = set()
for chrom, group in bins.groupby("chrom"):
sizes.update((group["end"] - group["start"]).iloc[:-1].unique())
if len(sizes) > 1:
return None
if len(sizes) == 1:
return next(iter(sizes))
else:
return None
def get_chromsizes(bins):
"""
Infer chromsizes Series from a bin DataFrame. Assumes that the last bin of
each contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
chromtable = (
bins.drop_duplicates(["chrom"], keep="last")[["chrom", "end"]]
.reset_index(drop=True)
.rename(columns={"chrom": "name", "end": "length"})
)
chroms, lengths = list(chromtable["name"]), list(chromtable["length"])
return pd.Series(index=chroms, data=lengths)
def bedslice(grouped, chromsizes, region):
"""
Range query on a BED-like dataframe with non-overlapping intervals.
"""
chrom, start, end = parse_region(region, chromsizes)
result = grouped.get_group(chrom)
if start > 0 or end < chromsizes[chrom]:
lo = result["end"].values.searchsorted(start, side="right")
hi = lo + result["start"].values[lo:].searchsorted(end, side="left")
result = result.iloc[lo:hi]
return result
def asarray_or_dataset(x):
return x if isinstance(x, h5py.Dataset) else np.asarray(x)
def rlencode(array, chunksize=None):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
array = asarray_or_dataset(array)
n = len(array)
if n == 0:
return (
np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=array.dtype),
)
if chunksize is None:
chunksize = n
starts, values = [], []
last_val = np.nan
for i in range(0, n, chunksize):
x = array[i : i + chunksize]
locs = where(x[1:] != x[:-1]) + 1
if x[0] != last_val:
locs = np.r_[0, locs]
starts.append(i + locs)
values.append(x[locs])
last_val = x[-1]
starts = np.concatenate(starts)
lengths = np.diff(np.r_[starts, n])
values = np.concatenate(values)
return starts, lengths, values
def cmd_exists(cmd):
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
def mad(data, axis=None):
return np.median(np.abs(data - np.median(data, axis)), axis)
@contextmanager
def open_hdf5(fp, mode="r", *args, **kwargs):
"""
Context manager like ``h5py.File`` but accepts already open HDF5 file
handles which do not get closed on teardown.
Parameters
----------
fp : str or ``h5py.File`` object
If an open file object is provided, it passes through unchanged,
provided that the requested mode is compatible.
If a filepath is passed, the context manager will close the file on
tear down.
mode : str
* r Readonly, file must exist
* r+ Read/write, file must exist
* a Read/write if exists, create otherwise
* w Truncate if exists, create otherwise
* w- or x Fail if exists, create otherwise
"""
if isinstance(fp, six.string_types):
own_fh = True
fh = h5py.File(fp, mode, *args, **kwargs)
else:
own_fh = False
if mode == "r" and fp.file.mode == "r+":
# warnings.warn("File object provided is writeable but intent is read-only")
pass
elif mode in ("r+", "a") and fp.file.mode == "r":
raise ValueError("File object provided is not writeable")
elif mode == "w":
raise ValueError("Cannot truncate open file")
elif mode in ("w-", "x"):
raise ValueError("File exists")
fh = fp
try:
yield fh
finally:
if own_fh:
fh.close()
class closing_hdf5(h5py.Group):
def __init__(self, grp):
super(closing_hdf5, self).__init__(grp.id)
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.file.close()
def close(self):
self.file.close()
def attrs_to_jsonable(attrs):
out = dict(attrs)
for k, v in attrs.items():
try:
out[k] = v.item()
except ValueError:
out[k] = v.tolist()
except AttributeError:
out[k] = v
return out
def infer_meta(x, index=None): # pragma: no cover
"""
Extracted and modified from dask/dataframe/utils.py :
make_meta (BSD licensed)
Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError(
"Can't handle meta of type " "'{0}'".format(type(x).__name__)
)
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
if hasattr(x, "_meta"):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), " "got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except: # noqa
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def get_meta(
columns, dtype=None, index_columns=None, index_names=None, default_dtype=np.object
): # pragma: no cover
"""
Extracted and modified from pandas/io/parsers.py :
_get_empty_meta (BSD licensed).
"""
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be default_dtype.
dtype = defaultdict(lambda: dtype or default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: default_dtype)
# Convert column indexes to column names.
for k, v in six.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_columns is None or index_columns is False:
index = pd.Index([])
else:
data = [pd.Series([], dtype=dtype[name]) for name in index_names]
if len(data) == 1:
index = pd.Index(data[0], name=index_names[0])
else:
index = pd.MultiIndex.from_arrays(data, names=index_names)
index_columns.sort()
for i, n in enumerate(index_columns):
columns.pop(n - i)
col_dict = {col_name: pd.Series([], dtype=dtype[col_name]) for col_name in columns}
return pd.DataFrame(col_dict, columns=columns, index=index)
def check_bins(bins, chromsizes):
is_cat = pd.api.types.is_categorical(bins["chrom"])
bins = bins.copy()
if not is_cat:
bins["chrom"] = pd.Categorical(
bins.chrom, categories=list(chromsizes.index), ordered=True
)
else:
assert (bins["chrom"].cat.categories == chromsizes.index).all()
return bins
def balanced_partition(gs, n_chunk_max, file_contigs, loadings=None):
# n_bins = len(gs.bins)
grouped = gs._bins_grouped
chrom_nbins = grouped.size()
if loadings is None:
loadings = chrom_nbins
chrmax = loadings.idxmax()
loadings = loadings / loadings.loc[chrmax]
const = chrom_nbins.loc[chrmax] / n_chunk_max
granges = []
for chrom, group in grouped:
if chrom not in file_contigs:
continue
clen = gs.chromsizes[chrom]
step = int(np.ceil(const / loadings.loc[chrom]))
anchors = group.start.values[::step]
if anchors[-1] != clen:
anchors = np.r_[anchors, clen]
granges.extend(
(chrom, start, end) for start, end in zip(anchors[:-1], anchors[1:])
)
return granges
class GenomeSegmentation(object):
def __init__(self, chromsizes, bins):
bins = check_bins(bins, chromsizes)
self._bins_grouped = bins.groupby("chrom", sort=False)
nbins_per_chrom = self._bins_grouped.size().values
self.chromsizes = chromsizes
self.binsize = get_binsize(bins)
self.contigs = list(chromsizes.keys())
self.bins = bins
self.idmap = pd.Series(index=chromsizes.keys(), data=range(len(chromsizes)))
self.chrom_binoffset = np.r_[0, np.cumsum(nbins_per_chrom)]
self.chrom_abspos = np.r_[0, np.cumsum(chromsizes.values)]
self.start_abspos = (
self.chrom_abspos[bins["chrom"].cat.codes] + bins["start"].values
)
def fetch(self, region):
chrom, start, end = parse_region(region, self.chromsizes)
result = self._bins_grouped.get_group(chrom)
if start > 0 or end < self.chromsizes[chrom]:
lo = result["end"].values.searchsorted(start, side="right")
hi = lo + result["start"].values[lo:].searchsorted(end, side="left")
result = result.iloc[lo:hi]
return result
def buffered(chunks, size=10000000):
"""
Take an incoming iterator of small data frame chunks and buffer them into
an outgoing iterator of larger chunks.
Parameters
----------
chunks : iterator of :py:class:`pandas.DataFrame`
Each chunk should have the same column names.
size : int
Minimum length of output chunks.
Yields
------
Larger outgoing :py:class:`pandas.DataFrame` chunks made from concatenating
the incoming ones.
"""
buf = []
n = 0
for chunk in chunks:
n += len(chunk)
buf.append(chunk)
if n > size:
yield | pd.concat(buf, axis=0) | pandas.concat |
##Edit Lukas:added L, <NAME> to classification
##added prepare_color_catalog
import pandas as pd
import numpy as np
import math
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
def prepare_flux_ratio_catalog(cat,passband_names,sigma=False):
""" Calculating the flux ratios from the fluxes provided by
the input df and dropping all rows with NaN values in the
process to ensure a full data set
Input:
cat (DataFrame) as the input flux catalog
passband_names (list) of the filter names considered
for calculating the flux ratios
Output:
df (DataFrame) catalog including the flux ratios
flux_ratio_names (list) list of the labels for
the flux ratio columns
"""
df = cat.copy(deep=True)
# Drop all rows with Inf and NaN values in the passband considered
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df.dropna(axis=0,how='any',subset=passband_names,inplace=True)
# Calculate the flux ratios and add them to the dataframe
flux_ratio_names = []
flux_ratio_err_names= []
if sigma :
for name in passband_names:
df.dropna(axis=0,how='any',subset=['sigma_'+name],inplace=True)
for i in range(len(passband_names)-1):
passband_a = np.array(df[passband_names[i]])
passband_b = np.array(df[passband_names[i+1]])
sigma_a = np.array(df['sigma_'+passband_names[i]])
sigma_b = np.array(df['sigma_'+passband_names[i+1]])
passband_a_name = passband_names[i].split('_')[1]
passband_b_name = passband_names[i+1].split('_')[1]
df[str(passband_a_name+passband_b_name)] = \
passband_a / passband_b
flux_ratio_names.append(str(passband_a_name+passband_b_name))
df[str('sigma_'+passband_a_name+passband_b_name)] = \
np.sqrt((sigma_a/passband_b)**2 + (passband_a/passband_b**2*sigma_b))
flux_ratio_err_names.append('sigma_'+ \
str(passband_a_name+passband_b_name))
else :
#for name in passband_names:
#df.dropna(axis=0,how='any',subset=['sigma_'+name],inplace=True)
for i in range(len(passband_names)-1):
passband_a = np.array(df[passband_names[i]])
passband_b = np.array(df[passband_names[i+1]])
# sigma_a = np.array(df['sigma_'+passband_names[i]])
# sigma_b = np.array(df['sigma_'+passband_names[i+1]])
passband_a_name = passband_names[i].split('_')[1]
passband_b_name = passband_names[i+1].split('_')[1]
df[str(passband_a_name+passband_b_name)] = \
passband_a / passband_b
flux_ratio_names.append(str(passband_a_name+passband_b_name))
# df[str('sigma_'+passband_a_name+passband_b_name)] = \
# np.sqrt((sigma_a/passband_b)**2 + (passband_a/passband_b**2*sigma_b))
# flux_ratio_err_names.append('sigma_'+ \
# str(passband_a_name+passband_b_name))
return df, flux_ratio_names
def prepare_color_catalog(cat,passband_names,sigma=False):
""" Calculating the colors from the mags provided by
the input df and dropping all rows with NaN values in the
process to ensure a full data set. The bands are expected to be named
<something>_mag_<band>
Input:
cat (DataFrame) as the input flux catalog
passband_names (list) of the filter names considered
for calculating the flux ratios
Output:
df (DataFrame) catalog including the flux ratios
flux_ratio_names (list) list of the labels for
the flux ratio columns
"""
df = cat.copy(deep=True)
# Drop all rows with Inf and NaN values in the passband considered
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df.dropna(axis=0,how='any',subset=passband_names,inplace=True)
# Calculate the flux ratios and add them to the dataframe
flux_ratio_names = []
flux_ratio_err_names= []
if sigma :
for name in passband_names:
df.dropna(axis=0,how='any',subset=['sigma_'+name],inplace=True)
for i in range(len(passband_names)-1):
passband_a = np.array(df[passband_names[i]])
passband_b = np.array(df[passband_names[i+1]])
sigma_a = np.array(df['sigma_'+passband_names[i]])
sigma_b = np.array(df['sigma_'+passband_names[i+1]])
passband_a_name = passband_names[i].split('_mag_')[1]
passband_b_name = passband_names[i+1].split('_mag_')[1]
df[str(passband_a_name+'-'+passband_b_name)] = \
passband_a - passband_b
flux_ratio_names.append(str(passband_a_name+'-'+passband_b_name))
df[str('sigma_'+passband_a_name+'-'+passband_b_name)] = \
np.sqrt((sigma_a)**2 + (sigma_b)**2) ##I think thats how to calc the error
#I don't use this so I havent tested it
flux_ratio_err_names.append('sigma_'+ \
str(passband_a_name+'-'+passband_b_name))
else :
#for name in passband_names:
#df.dropna(axis=0,how='any',subset=['sigma_'+name],inplace=True)
for i in range(len(passband_names)-1):
passband_a = np.array(df[passband_names[i]])
passband_b = np.array(df[passband_names[i+1]])
# sigma_a = np.array(df['sigma_'+passband_names[i]])
# sigma_b = np.array(df['sigma_'+passband_names[i+1]])
passband_a_name = passband_names[i].split('_mag_')[1]
passband_b_name = passband_names[i+1].split('_mag_')[1]
df[str(passband_a_name+'-'+passband_b_name)] = \
passband_a - passband_b
flux_ratio_names.append(str(passband_a_name+'-'+passband_b_name))
# df[str('sigma_'+passband_a_name+passband_b_name)] = \
# np.sqrt((sigma_a/passband_b)**2 + (passband_a/passband_b**2*sigma_b))
# flux_ratio_err_names.append('sigma_'+ \
# str(passband_a_name+passband_b_name))
return df, flux_ratio_names
def build_full_sample(df_stars, df_quasars, star_qso_ratio):
""" Merging the star and quasar flux_ratio catalogs according to
the set variable star_quasar_ratio. This is the first step to create
more realistic data set, since the intrinsic ratio of stars to quasars
will not be mimicked by simply combining both data sets. The catalogs
are labelled dependend on their origin catalog.
TO DO:
This function should be expanded in order to return a DataFrame that
mimicks the intrinsic quasar/star distribution as good as possible.
Parameters:
df_stars : pandas dataframe
Star flux ratio catalog
df_quasars : pandas dataframe
Quasar flux ratio catalog
star_qso_ratio : integer
Goal ratio of stars to quasars
Returns:
df : pandas dataframe
Merged flux ratio catalog with specified star to quasar ratio
"""
df_quasars['label'] = 'QSO'
df_stars['label'] = 'STAR'
if df_stars.shape[0] > df_quasars.shape[0]*star_qso_ratio:
# calculate number of objects to sample
sample_size = df_quasars.shape[0]
star_sample = df_stars.sample(sample_size*star_qso_ratio)
qso_sample = df_quasars.sample(sample_size)
df = | pd.concat([qso_sample,star_sample], sort=False) | pandas.concat |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = | pd.period_range('1/1/2000', freq='D', periods=5) | pandas.period_range |
#!/usr/bin/env python3
# -*- codinchmodg: utf-8 -*-
# Imports
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import plotly.graph_objs as go
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
import umap
class Dash_UMAP(dash.Dash):
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
def __init__(self, data, labels):
super().__init__(__name__, external_stylesheets= self.external_stylesheets)
self.title = 'UMAP analysis'
self.data = data
self.labels = labels
self.labelled_data = | pd.DataFrame.join(data, labels) | pandas.DataFrame.join |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import statsmodels
from matplotlib import pyplot
from scipy import stats
import statsmodels.api as sm
import warnings
from itertools import product
import datetime as dt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
# Inserting 0 for NA
data.fillna(0, inplace=True)
# plt.figure(figsize=[10,4])
# plt.title('BTC Price (USD) Daily')
# plt.plot(data.price, '-', label='Daily')
# Monthly
data['date'] = pd.to_datetime(data['date'])
data['date'] = data['date'].dt.tz_localize(None)
data = data.groupby([pd.Grouper(key='date', freq='M')]).first().reset_index()
data = data.set_index('date')
data['price'].fillna(method='ffill', inplace=True)
# Decomposition - only for price though!
# decomposition = sm.tsa.seasonal_decompose(data.price)
#
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
#
# fig = plt.figure(figsize=(10,8))
#
# plt.subplot(411)
# plt.plot(data.price, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal,label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
#
# fig.suptitle('Decomposition of Prices Data')
# plt.show()
# Setting the data structure
reframed = series_to_supervised(data, 1, 1)
# Also removing the lagged price, as this will be created in the ARIMA
reframed.drop(reframed.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
print(reframed.head())
# split data
split_date = '2018-06-25'
reframed_train = reframed.loc[reframed.index <= split_date].copy()
reframed_test = reframed.loc[reframed.index > split_date].copy()
# Prøver lige ARIMA på original data
# Det her er en seasonal ARIMA, SARIMA, så nok ekstra resultat efter en regulær ARIMA
# Hjælp til kommentering findes her: https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/
# Den fitter fint hvis man ikke opdeler i train og test..
# Initial approximation of parameters
Qs = range(0, 2)
qs = range(0, 3)
Ps = range(0, 3)
ps = range(0, 3)
D=1
d=1
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
x_train = reframed_train.iloc[:,:-1].values
y_train = reframed_train.iloc[:,-1]
x_test = reframed_test.iloc[:,:-1].values
y_test = reframed_test.iloc[:,-1]
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.statespace.SARIMAX(endog=y_train, exog=x_train, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12),enforce_stationarity=True,
enforce_invertibility=True).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
# Best Models
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
# Residual plot of the best model
fig = plt.figure(figsize=(10,4))
best_model.resid.plot()
fig.suptitle('Residual Plot of the Best Model')
print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.xxx -> Residuals are stationary
df_month2 = data[['price']]
future = pd.DataFrame()
df_month2 = pd.concat([df_month2, future])
df_month2['forecast'] = best_model.predict(start = len(x_train), end = len(x_train)+len(x_test)-1, exog=x_test)
plt.figure(figsize=(8,4))
df_month2.price.plot()
df_month2.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# Daily version
df = | pd.read_csv('Data/All_Merged.csv') | pandas.read_csv |
from package import dataHandler as dh
from package import featureHandler as fh
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_auc_score,accuracy_score
import random
import numpy as np
import pandas as pd
from sklearn import preprocessing
import itertools
def get_participant_matches(participants):
matches = get_fixed_participant_matches() + get_variable_participant_matches(participants,age_range=5)
return matches
def get_fixed_participant_matches():
"""There are 10 control females and 7 viable PD females. Thus to use most of the data set possible, the 3 of the control females will need to match with 3 PD males
9 of these control females will be a fixed match. 7 control and PD female matches were done so simply by ordering age by ascending order and matching them. The other 3 control females were all older, so this will create matches with the least age difference
Of the 3 older control females, 2 of them will be fix matched with 2 PD males of the closest age. These PD males are not similar in age to any control males, so they would not have been utilised anyway"""
female_matches = [('C010', 'P019'), #53 and 53
('C031', 'P038'), #67 and 57
('C030', 'P021'), #67 and 58
('C028', 'P001'), #69 and 58
('C024', 'P026'), #71 and 62
('C025', 'P027'), #72 and 67
('C014', 'P008')] #74 and 69
mixed_matches = [('C021', 'P002'), #81 and 82
('C032', 'P012')] #94 and 91
return female_matches + mixed_matches
def get_variable_participant_matches(participants, age_range=5):
controls_to_match = participants.loc[['C004','C013','C009','C020','C006','C026']] #C026 is female, everyone else male
viable_matches = dh.df_retrieve(participants,{'is PD': True,'Sex':'Male'})
viable_matches = viable_matches.loc[~viable_matches.index.isin(['P002','P012','P013','P014'])] #exclude these because P002 and P012 matched already with other females, and P013 has weird CoP that results in some features being NaN
#Pair controls with their potential matches
potential_matches_df = pd.DataFrame(columns=['Possible PD matches','How many'])
for control in controls_to_match.index:
age = controls_to_match.loc[control,'Age']
potential_matches = []
for r in range(-age_range,age_range+1):
m = dh.df_retrieve(viable_matches,{'Age':age+r})
potential_matches += m.index.tolist()
potential_matches_df.loc[control,'Possible PD matches'] = potential_matches
potential_matches_df.loc[control,'How many'] = len(potential_matches)
potential_matches_df = potential_matches_df.sort_values(by='How many')
#def helper to remove all occurances of a person in potential matches
def remove_match_from_potentials(df,match):
for participant in df.index:
possible_matches = df.loc[participant,'Possible PD matches']
if match in possible_matches: possible_matches.remove(match)
df.loc[participant,'Possible PD matches'] = possible_matches
return df
matches = []
for control in potential_matches_df.index:
possible_matches = potential_matches_df.loc[control,'Possible PD matches']
match = random.choice(possible_matches)
matches.append((control,match))
potential_matches_df = remove_match_from_potentials(potential_matches_df,match) #remove match from all other possible choices to stop double dipping
return matches
def get_ensemble_model(seed):
clf1 = LogisticRegression(tol=1e-6, solver='liblinear', max_iter=1000, random_state=seed)
clf2 = SVC(kernel='rbf', probability=True, tol=1e-4, max_iter=-1, random_state=seed)
clf3 = RandomForestClassifier(random_state=seed)
#clf4 = GaussianNB()
#clf5 = KNeighborsClassifier(n_neighbors=3)
#clf6 = DecisionTreeClassifier(random_state=seed)
# eclf = VotingClassifier(estimators=[('lr', clf1), ('svc', clf2), ('rf', clf3), ('gb', clf4), ('knn', clf5), ('tree', clf6)], voting='soft', n_jobs=-1)
eclf = VotingClassifier(estimators=[('lr', clf1), ('svc', clf2), ('rf', clf3)], voting='soft', n_jobs=-1)
return eclf
def get_X_y(matches, train_index, test_index, features):
train_participants = list(itertools.chain(*[matches[i] for i in train_index]))
train_features = features.loc[train_participants]
train_X = train_features.to_numpy()
train_y = np.array([1 if 'P' in file else 0 for file in train_features.index])
test_participants = list(itertools.chain(*[matches[i] for i in test_index]))
test_features = features.loc[test_participants]
test_X = test_features.to_numpy()
test_y = np.array([1 if 'P' in file else 0 for file in test_features.index])
return train_X, train_y, test_X, test_y
def get_all_AUROCs_with_feature_selection(participants,features,feature_selector=None, repetitions=10,return_dataframe=False,seeds=None):
"""The concept here is a little different from get_individual_AUROCs.
The point here is to use all features, that includes the EO and EC and the features that use the relationship between EO and EC. For this reason, we redefine the task as a classification of participant, rather than classification of a recording.
A total of 30 participants are used here. Each participant will have the entire list of features generated from both their sway files (EO and EC). Some redundancies will be removed (i.e., the features that are exactly the same)
"""
scaler = preprocessing.StandardScaler()
AUROCs = pd.DataFrame()
for rep in range(repetitions):
if seeds is not None:
seed = seeds[rep]
random.seed(seed)
np.random.seed(seed)
else: seed = 0
matches = get_participant_matches(participants)
kf = KFold(n_splits=5,shuffle=True)
kf.get_n_splits(matches)
for fold, (train_index, test_index) in enumerate(kf.split(matches)):
train_X, train_y, test_X, test_y=get_X_y(matches,train_index,test_index,features)
train_X = scaler.fit_transform(train_X)
test_X = scaler.transform(test_X)
if feature_selector is not None:
feature_selector.fit(train_X,train_y)
train_X = feature_selector.transform(train_X)
test_X = feature_selector.transform(test_X)
eclf = get_ensemble_model(seed)
#eclf = LogisticRegression(tol=1e-6, solver='liblinear', max_iter=1000, random_state=seed)
eclf.fit(train_X, train_y)
AUROC = roc_auc_score(test_y, eclf.predict_proba(test_X)[:,1])
AUROCs.at[rep,fold] = AUROC
if return_dataframe: return AUROCs
else: return AUROCs.mean(axis=1)
def get_effectiveness(participants,features,feature_selector=None, repetitions=10,return_dataframe=False,seeds=None):
"""The concept here is a little different from get_individual_AUROCs.
The point here is to use all features, that includes the EO and EC and the features that use the relationship between EO and EC. For this reason, we redefine the task as a classification of participant, rather than classification of a recording.
A total of 30 participants are used here. Each participant will have the entire list of features generated from both their sway files (EO and EC). Some redundancies will be removed (i.e., the features that are exactly the same)
"""
scaler = preprocessing.StandardScaler()
AUROCs = pd.DataFrame()
ACCs = pd.DataFrame()
for rep in range(repetitions):
if seeds is not None:
seed = seeds[rep]
random.seed(seed)
np.random.seed(seed)
else: seed = 0
matches = get_participant_matches(participants)
kf = KFold(n_splits=5,shuffle=True)
kf.get_n_splits(matches)
for fold, (train_index, test_index) in enumerate(kf.split(matches)):
train_X, train_y, test_X, test_y=get_X_y(matches,train_index,test_index,features)
train_X = scaler.fit_transform(train_X)
test_X = scaler.transform(test_X)
if feature_selector is not None:
feature_selector.fit(train_X,train_y)
train_X = feature_selector.transform(train_X)
test_X = feature_selector.transform(test_X)
eclf = get_ensemble_model(seed)
#eclf = LogisticRegression(tol=1e-6, solver='liblinear', max_iter=1000, random_state=seed)
eclf.fit(train_X, train_y)
AUROC = roc_auc_score(test_y, eclf.predict_proba(test_X)[:,1])
acc = accuracy_score(test_y, eclf.predict(test_X))
AUROCs.at[rep,fold] = AUROC
ACCs.at[rep,fold] = acc
if return_dataframe: return AUROCs
else: return AUROCs.mean(axis=1)
def get_individual_AUROCs(participants,features,repetitions=10,return_dataframe=False,seeds=None):
"""
features: df sampled at a specific fs
"""
folds = 5
AUROCs = pd.DataFrame()
scaler = preprocessing.StandardScaler()
for rep in range(repetitions):
if seeds is not None:
seed = seeds[rep]
random.seed(seed)
np.random.seed(seed)
else: seed = 0
matches = get_participant_matches(participants)
kf = KFold(n_splits=folds,shuffle=True)
kf.get_n_splits(matches)
kf_AUROCs = | pd.DataFrame() | pandas.DataFrame |
import geopy.distance as geo
import mysql.connector
from sqlalchemy import create_engine
import numpy as np
import pandas as pd
import time
import yaml
with open("config.yml", 'r') as config_doc:
config = yaml.safe_load(config_doc)
cnx = mysql.connector.connect(**config)
divvy = pd.read_sql('select * from stations', cnx).to_dict('records')
cta = pd.read_sql('select * from cta_stations', cnx)
cta = cta.drop_duplicates('station_id').to_dict('records')
print('Number of Divvy Stations:', len(divvy))
print('Number of CTA Stations:', len(cta))
def distance_between_two_stations(s1, s2):
coords1 = (s1['latitude'], s1['longitude'])
coords2 = (s2['latitude'], s2['longitude'])
return geo.distance(coords1, coords2).miles
print("Computing distances")
l = []
for d in divvy:
for c in cta:
l.append({
'divvy_station_id': d['id'],
'cta_station_id': c['station_id'],
'distance': distance_between_two_stations(d, c)
})
df = | pd.DataFrame(l) | pandas.DataFrame |
# =========================================================================== #
# ANALYSIS #
# =========================================================================== #
'''Analysis and inference functions'''
# %%
# --------------------------------------------------------------------------- #
# LIBRARIES #
# --------------------------------------------------------------------------- #
import os
import sys
import inspect
import numpy as np
import pandas as pd
import scipy
from scipy import stats
from scipy.stats import kurtosis, skew
import textwrap
import univariate
import visual
import description
import independence
# %%
# ---------------------------------------------------------------------------- #
# ANALYSIS #
# ---------------------------------------------------------------------------- #
def analysis(df, x, y, hue=None):
k = independence.Kruskal()
a = independence.Anova()
if ((df[x].dtype == np.dtype('int64') or df[x].dtype == np.dtype('float64')) and
(df[y].dtype == np.dtype('int64') or df[y].dtype == np.dtype('float64'))):
desc = pd.DataFrame()
dx = description.describe_quant_x(df[x])
dy = description.describe_quant_x(df[y])
desc = | pd.concat([dx, dy], axis=0) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import os.path as osp
import pickle
from sklearn.model_selection import train_test_split
import scipy.io as sio
class DataHandler:
def __init__(self, meta_data, framework_root):
print('initialize data handler')
self.framework_root = framework_root
self.train_randomized_indices = None
self.set_meta_data(meta_data)
# create the datastructure dictionary containing all data with
self.all_data_not_splited = {'output_df':None}
self.all_data_dict = {'df_list': [],
'train': {'input': None,
'output_df': None, # pandas dataframe containing all the outputs
# 'output': {'Anxiety_Level': [],
# 'Time': []},
# 'map_to_df_list': [],
'iterator': 0},
'valid': {'input': None,
'output_df': None, # pandas dataframe containing all the outputs
# 'output': {'Anxiety_Level': [],
# 'Time': []},
# 'map_to_df_list': [],
'iterator': 0}}
print("------ sampling method is: {}. It can be [retrospective, uniform, prospective] ------".format(self.meta_data['sampling_method']))
print("------ sampling_period is: {} seconds ------" .format(self.meta_data['sampling_period_seconds']))
if self.meta_data['use_total_time_minutes']:
print("------ use_total_time_minutes is true. ------")
total_sampled_time = self.meta_data['total_sampled_time_minutes']
else:
print("------ use_total_time_minutes is false. => Will use sampling_selected_rows. ------")
print("------ sampling_selected_rows is: {} rows".format(self.meta_data['sampling_selected_rows']))
total_sampled_time = self.meta_data['sampling_selected_rows'] * self.meta_data['sampling_period_seconds'] / 60.0
print("------ total_sampled_time is: {} minutes".format(total_sampled_time))
#region metadata handling methods
def set_meta_data(self, meta_data):
self.meta_data = meta_data
def get_meta_data(self):
return self.meta_data
def update_meta_data(self, input_meta_data):
self.meta_data.update(input_meta_data)
#endregion
def GetTimeRangeSampledDataFrame(self, df_x, time_y, random_time_shift_value_seconds=0,
total_time_minutes=0,
period_seconds=5,
sampling_method='retrospective',
use_total_time_minutes=False,
sampling_selected_rows=None): #TODO make this possible to use either #of rows or period and total time
if use_total_time_minutes:
if total_time_minutes is None:
raise Exception("Error: use_total_time_minutes is True but total_time_minutes is not provided")
if total_time_minutes == 0:
raise Exception("Error: total_time_minutes cannot be 0 minutes")
else:
if sampling_selected_rows == None:
raise Exception("Error: sampling_selected_rows is not provided")
total_time_minutes = np.ceil(sampling_selected_rows * period_seconds/60)
total_time_minutes = pd.Timedelta(total_time_minutes, unit='m')
period_seconds = | pd.Timedelta(period_seconds, unit='s') | pandas.Timedelta |
from itertools import compress
import numpy
import pandas
from sklearn.base import BaseEstimator, TransformerMixin
from data_algebra.data_ops import *
def mk_data(*, nrow, n_noise_var=0, n_signal_var=0, n_noise_level=1000):
# combination of high-complexity useless variables
# and low-complexity useful variables
y = numpy.random.normal(size=nrow)
d = pandas.DataFrame({"const_col": ["a"] * nrow})
noise_levels = ["nl_" + str(j) for j in range(n_noise_level)]
for i in range(n_noise_var):
d["noise_" + str(i)] = numpy.random.choice(
noise_levels, replace=True, size=nrow
)
signal_levels = {"a": 1, "b": -1}
for i in range(n_signal_var):
v = "signal_" + str(i)
d[v] = numpy.random.choice(
[k for k in signal_levels.keys()], replace=True, size=nrow
)
vn = d[v].map(signal_levels)
y = y + vn
return d, y
# noinspection PyPep8Naming,PyUnusedLocal
class TransformerAdapter(BaseEstimator, TransformerMixin):
def __init__(self, model):
self.model = model
def fit(self, X, y):
self.model.fit(X, y)
return self
def transform(self, X):
return self.model.transform(X)
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y)
return self.transform(X)
def predict(self, X):
return self.transform(X)
def fit_predict(self, X, y=None, **fit_params):
return self.fit_transform(X, y)
# https://github.com/WinVector/data_algebra/blob/master/Examples/cdata/ranking_pivot_example.md
class Container:
def __init__(self, value):
self.value = value
def __repr__(self):
return self.value.__repr__()
def __str__(self):
return self.value.__repr__()
def update(self, other):
if not isinstance(other, Container):
return self
return Container(sorted([vi for vi in set(self.value).union(other.value)]))
def solve_for_partition(d_original, d_coded):
def sorted_concat(vals):
return Container(sorted([vi for vi in set(vals)]))
def combine_containers(lcv, rcv):
return [lft.update(rgt) for lft, rgt in zip(lcv, rcv)]
nrow = d_original.shape[0]
ncol = d_original.shape[1]
pairs = pandas.DataFrame({"idx": range(nrow), "complement": [Container([])] * nrow})
for j in range(ncol):
dj = pandas.DataFrame(
{
"orig": d_original.iloc[:, j],
"coded": d_coded.iloc[:, j],
"idx": range(nrow),
}
)
ops_collect = (
describe_table(dj, table_name="dj")
.rename_columns({"coded_left": "coded", "idx_left": "idx"})
.natural_join(
b=describe_table(dj, table_name="dj"), jointype="full", by=["orig"]
)
.select_rows("(coded_left - coded).abs() > 1.0e-5")
.project(
{"complement": user_fn(sorted_concat, "idx")}, group_by=["idx_left"]
)
.rename_columns({"idx": "idx_left"})
)
pairsj = ops_collect.transform(dj)
ops_join = (
describe_table(pairs, table_name="pairs")
.natural_join(
b=describe_table(pairsj, table_name="pairsj").rename_columns(
{"c_right": "complement"}
),
jointype="left",
by=["idx"],
)
.extend(
{"complement": user_fn(combine_containers, ["complement", "c_right"])}
)
.drop_columns("c_right")
)
pairs = ops_join.eval({"pairs": pairs, "pairsj": pairsj})
return pairs
def collect_relations(*, d_original, d_coded, d_partition, est_fn, y_check=None):
nrow = d_original.shape[0]
ncol = d_original.shape[1]
relns_x = []
relns_y = []
for j in range(ncol):
col_j = d_original.iloc[:, j]
values_j = [v for v in set(col_j)]
for v in values_j:
positions = set([i for i in compress(range(nrow), col_j == v)])
for p in positions:
partition_indexes = d_partition["complement"][p].value
value_indexes = [i for i in positions.intersection(partition_indexes)]
wts = est_fn(
nrow=nrow,
partition_indexes=partition_indexes,
value_indexes=value_indexes,
)
if wts is not None and numpy.sum(numpy.abs(wts)) > 1.0e-7:
# should have d_coded.iloc[p, j] == numpy.dot(wts, y_check)
value = d_coded.iloc[p, j]
relns_x.append(wts)
relns_y.append([value, p, j, v])
if y_check is not None:
check = numpy.dot(wts, y_check)
if numpy.abs(value - check) > 1.0e-3:
raise ValueError(
"check failed j: " + str(j) + ", p: " + str(p)
)
relns_y = | pandas.DataFrame(relns_y) | pandas.DataFrame |
import copy
import itertools
import multiprocessing
import string
import traceback
import warnings
from multiprocessing import Pool
from operator import itemgetter
import jellyfish as jf
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy.stats import wasserstein_distance
from simod.configuration import Configuration, Metric
from . import alpha_oracle as ao
from .alpha_oracle import Rel
from ..support_utils import progress_bar_async
class SimilarityEvaluator:
"""Evaluates the similarity of two event-logs."""
def __init__(self, log_data: pd.DataFrame, simulation_data: pd.DataFrame, settings: Configuration, max_cases=500,
dtype='log'):
self.dtype = dtype
self.log_data = copy.deepcopy(log_data)
self.simulation_data = copy.deepcopy(simulation_data)
self.max_cases = max_cases
self.one_timestamp = settings.read_options.one_timestamp
self._preprocess_data(dtype)
def _preprocess_data(self, dtype):
preprocessor = self._get_preprocessor(dtype)
return preprocessor()
def _get_preprocessor(self, dtype):
if dtype == 'log':
return self._preprocess_log
elif dtype == 'serie':
return self._preprocess_serie
else:
raise ValueError(dtype)
def _preprocess_log(self):
self.ramp_io_perc = 0.2
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
data = pd.concat([self.log_data, self.simulation_data], axis=0, ignore_index=True)
if (('processing_time' not in data.columns) or ('waiting_time' not in data.columns)):
data = self.calculate_times(data)
data = self.scaling_data(data)
# save data
self.log_data = data[data.source == 'log']
self.simulation_data = data[data.source == 'simulation']
self.alias = self.create_task_alias(data, 'task')
self.alpha_concurrency = ao.AlphaOracle(self.log_data, self.alias, self.one_timestamp, True)
# reformat and sampling data
self.log_data = self.reformat_events(self.log_data.to_dict('records'), 'task')
self.simulation_data = self.reformat_events(self.simulation_data.to_dict('records'), 'task')
num_traces = int(len(self.simulation_data) * self.ramp_io_perc)
self.simulation_data = self.simulation_data[num_traces:-num_traces]
self.log_data = list(map(lambda i: self.log_data[i],
np.random.randint(0, len(self.log_data), len(self.simulation_data))))
def _preprocess_serie(self):
# load data
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
def measure_distance(self, metric: Metric, verbose=False):
"""
Measures the distance of two event-logs
with with tsd or dl and mae distance
Returns
-------
distance : float
"""
self.verbose = verbose
# similarity measurement and matching
evaluator = self._get_evaluator(metric)
if metric in [Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
distance = evaluator(self.log_data, self.simulation_data, criteria=metric)
else:
distance = evaluator(self.log_data, self.simulation_data, metric)
self.similarity = {'metric': metric, 'sim_val': np.mean([x['sim_score'] for x in distance])}
def _get_evaluator(self, metric: Metric):
if self.dtype == 'log':
if metric in [Metric.TSD, Metric.DL, Metric.MAE, Metric.DL_MAE]:
return self._evaluate_seq_distance
elif metric is Metric.LOG_MAE:
return self.log_mae_metric
elif metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.log_emd_metric
else:
raise ValueError(metric)
elif self.dtype == 'serie':
if metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.serie_emd_metric
else:
raise ValueError(metric)
else:
raise ValueError(self.dtype)
# =============================================================================
# Timed string distance
# =============================================================================
def _evaluate_seq_distance(self, log_data, simulation_data, metric: Metric):
"""
Timed string distance calculation
Parameters
----------
log_data : Ground truth list
simulation_data : List
Returns
-------
similarity : tsd similarity
"""
similarity = list()
# define the type of processing sequencial or parallel
cases = len(set([x['caseid'] for x in log_data]))
if cases <= self.max_cases:
args = (metric, simulation_data, log_data,
self.alpha_concurrency.oracle,
({'min': 0, 'max': len(simulation_data)},
{'min': 0, 'max': len(log_data)}))
df_matrix = self._compare_traces(args)
else:
cpu_count = multiprocessing.cpu_count()
mx_len = len(log_data)
ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count / 2)))
ranges = list(itertools.product(*[ranges, ranges]))
reps = len(ranges)
pool = Pool(processes=cpu_count)
# Generate
args = [(metric, simulation_data[r[0]['min']:r[0]['max']],
log_data[r[1]['min']:r[1]['max']],
self.alpha_concurrency.oracle,
r) for r in ranges]
p = pool.map_async(self._compare_traces, args)
if self.verbose:
progress_bar_async(p, f'evaluating {metric}:', reps)
pool.close()
# Save results
df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)
df_matrix.sort_values(by=['i', 'j'], inplace=True)
df_matrix = df_matrix.reset_index().set_index(['i', 'j'])
if metric == Metric.DL_MAE:
dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()
mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()
# MAE normalized
max_mae = mae_matrix.max()
mae_matrix = np.divide(mae_matrix, max_mae)
# multiple both matrixes by Beta equal to 0.5
dl_matrix = np.multiply(dl_matrix, 0.5)
mae_matrix = np.multiply(mae_matrix, 0.5)
# add each point in between
cost_matrix = np.add(dl_matrix, mae_matrix)
else:
cost_matrix = df_matrix[['distance']].unstack().to_numpy()
row_ind, col_ind = linear_sum_assignment(np.array(cost_matrix))
# Create response
for idx, idy in zip(row_ind, col_ind):
similarity.append(dict(caseid=simulation_data[idx]['caseid'],
sim_order=simulation_data[idx]['profile'],
log_order=log_data[idy]['profile'],
sim_score=(cost_matrix[idx][idy]
if metric == Metric.MAE else
(1 - (cost_matrix[idx][idy])))
)
)
return similarity
@staticmethod
def _compare_traces(args):
def ae_distance(et_1, et_2, st_1, st_2):
cicle_time_s1 = (et_1 - st_1).total_seconds()
cicle_time_s2 = (et_2 - st_2).total_seconds()
ae = np.abs(cicle_time_s1 - cicle_time_s2)
return ae
def tsd_alpha(s_1, s_2, p_1, p_2, w_1, w_2, alpha_concurrency):
"""
Compute the Damerau-Levenshtein distance between two given
strings (s_1 and s_2)
Parameters
----------
comp_sec : dict
alpha_concurrency : dict
Returns
-------
Float
"""
def calculate_cost(s1_idx, s2_idx):
t_1 = p_1[s1_idx] + w_1[s1_idx]
if t_1 > 0:
b_1 = (p_1[s1_idx] / t_1)
cost = ((b_1 * np.abs(p_2[s2_idx] - p_1[s1_idx])) +
((1 - b_1) * np.abs(w_2[s2_idx] - w_1[s1_idx])))
else:
cost = 0
return cost
dist = {}
lenstr1 = len(s_1)
lenstr2 = len(s_2)
for i in range(-1, lenstr1 + 1):
dist[(i, -1)] = i + 1
for j in range(-1, lenstr2 + 1):
dist[(-1, j)] = j + 1
for i in range(0, lenstr1):
for j in range(0, lenstr2):
if s_1[i] == s_2[j]:
cost = calculate_cost(i, j)
else:
cost = 1
dist[(i, j)] = min(
dist[(i - 1, j)] + 1, # deletion
dist[(i, j - 1)] + 1, # insertion
dist[(i - 1, j - 1)] + cost # substitution
)
if i and j and s_1[i] == s_2[j - 1] and s_1[i - 1] == s_2[j]:
if alpha_concurrency[(s_1[i], s_2[j])] == Rel.PARALLEL:
cost = calculate_cost(i, j - 1)
dist[(i, j)] = min(dist[(i, j)], dist[i - 2, j - 2] + cost) # transposition
return dist[lenstr1 - 1, lenstr2 - 1]
def gen(metric: Metric, serie1, serie2, oracle, r):
"""Reads the simulation results stats"""
try:
df_matrix = list()
for i, s1_ele in enumerate(serie1):
for j, s2_ele in enumerate(serie2):
element = {'i': r[0]['min'] + i, 'j': r[1]['min'] + j}
if metric in [Metric.TSD, Metric.DL, Metric.DL_MAE]:
element['s_1'] = s1_ele['profile']
element['s_2'] = s2_ele['profile']
element['length'] = max(len(s1_ele['profile']), len(s2_ele['profile']))
if metric is Metric.TSD:
element['p_1'] = s1_ele['proc_act_norm']
element['p_2'] = s2_ele['proc_act_norm']
element['w_1'] = s1_ele['wait_act_norm']
element['w_2'] = s2_ele['wait_act_norm']
if metric in [Metric.MAE, Metric.DL_MAE]:
element['et_1'] = s1_ele['end_time']
element['et_2'] = s2_ele['end_time']
element['st_1'] = s1_ele['start_time']
element['st_2'] = s2_ele['start_time']
df_matrix.append(element)
df_matrix = pd.DataFrame(df_matrix)
if metric is Metric.TSD:
df_matrix['distance'] = df_matrix.apply(
lambda x: tsd_alpha(x.s_1, x.s_2, x.p_1, x.p_2, x.w_1, x.w_2, oracle) / x.length, axis=1)
elif metric is Metric.DL:
df_matrix['distance'] = df_matrix.apply(
lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)
elif metric is Metric.MAE:
df_matrix['distance'] = df_matrix.apply(
lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)
elif metric is Metric.DL_MAE:
df_matrix['dl_distance'] = df_matrix.apply(
lambda x: jf.damerau_levenshtein_distance(''.join(x.s_1), ''.join(x.s_2)) / x.length, axis=1)
df_matrix['mae_distance'] = df_matrix.apply(
lambda x: ae_distance(x.et_1, x.et_2, x.st_1, x.st_2), axis=1)
else:
raise ValueError(metric)
return df_matrix
except Exception:
traceback.print_exc()
return gen(*args)
# =============================================================================
# whole log MAE
# =============================================================================
def log_mae_metric(self, log_data: list, simulation_data: list, metric: Metric) -> list:
"""
Measures the MAE distance between two whole logs
Parameters
----------
log_data : list
simulation_data : list
Returns
-------
list
"""
similarity = list()
log_data = pd.DataFrame(log_data)
simulation_data = pd.DataFrame(simulation_data)
log_timelapse = (log_data.end_time.max() - log_data.start_time.min()).total_seconds()
sim_timelapse = (simulation_data.end_time.max() - simulation_data.start_time.min()).total_seconds()
similarity.append({'sim_score': np.abs(sim_timelapse - log_timelapse)})
return similarity
# =============================================================================
# Log emd distance
# =============================================================================
def log_emd_metric(self, log_data: list, simulation_data: list, criteria: Metric = Metric.HOUR_EMD) -> list:
"""
Measures the EMD distance between two logs on different aggregation
levels specified by user by defaul per hour
Parameters
----------
log_data : list
simulation_data : list
criteria : TYPE, optional
DESCRIPTION. The default is 'hour'.
Returns
-------
list
"""
similarity = list()
window = 1
# hist_range = [0, int((window * 3600))]
log_data = pd.DataFrame(log_data)
simulation_data = pd.DataFrame(simulation_data)
def split_date_time(dataframe, feature, source):
day_hour = lambda x: x[feature].hour
dataframe['hour'] = dataframe.apply(day_hour, axis=1)
date = lambda x: x[feature].date()
dataframe['date'] = dataframe.apply(date, axis=1)
# create time windows
i = 0
daily_windows = dict()
for hour in range(24):
if hour % window == 0:
i += 1
daily_windows[hour] = i
dataframe = dataframe.merge(
pd.DataFrame.from_dict(daily_windows, orient='index').rename_axis('hour'),
on='hour',
how='left').rename(columns={0: 'window'})
dataframe = dataframe[[feature, 'date', 'window']]
dataframe.rename(columns={feature: 'timestamp'}, inplace=True)
dataframe['timestamp'] = pd.to_datetime(dataframe['timestamp'], utc=True)
dataframe['source'] = source
return dataframe
data = split_date_time(log_data, 'start_time', 'log')
data = pd.concat([data, split_date_time(log_data, 'end_time', 'log')], ignore_index=True)
data = pd.concat([data, split_date_time(simulation_data, 'start_time', 'sim')], ignore_index=True)
data = pd.concat([data, split_date_time(simulation_data, 'end_time', 'sim')], ignore_index=True)
data['weekday'] = data.apply(lambda x: x.date.weekday(), axis=1)
g_criteria = {Metric.HOUR_EMD: 'window', Metric.DAY_EMD: 'weekday', Metric.DAY_HOUR_EMD: ['weekday', 'window'],
Metric.CAL_EMD: 'date'}
similarity = list()
for key, group in data.groupby(g_criteria[criteria]):
w_df = group.copy()
w_df = w_df.reset_index()
basetime = w_df.timestamp.min().floor(freq='H')
diftime = lambda x: (x['timestamp'] - basetime).total_seconds()
w_df['rel_time'] = w_df.apply(diftime, axis=1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
log_hist = np.histogram(w_df[w_df.source == 'log'].rel_time, density=True)
sim_hist = np.histogram(w_df[w_df.source == 'sim'].rel_time, density=True)
if np.isnan(np.sum(log_hist[0])) or np.isnan(np.sum(sim_hist[0])):
similarity.append({'window': key, 'sim_score': 0})
else:
similarity.append({'window': key, 'sim_score': wasserstein_distance(log_hist[0], sim_hist[0])})
return similarity
# =============================================================================
# serie emd distance
# =============================================================================
def serie_emd_metric(self, log_data, simulation_data, criteria: Metric = Metric.HOUR_EMD):
similarity = list()
window = 1
log_data = pd.DataFrame(log_data)
simulation_data = pd.DataFrame(simulation_data)
def split_date_time(dataframe, feature, source):
day_hour = lambda x: x[feature].hour
dataframe['hour'] = dataframe.apply(day_hour, axis=1)
date = lambda x: x[feature].date()
dataframe['date'] = dataframe.apply(date, axis=1)
# create time windows
i = 0
daily_windows = dict()
for x in range(24):
if x % window == 0:
i += 1
daily_windows[x] = i
dataframe = dataframe.merge(
pd.DataFrame.from_dict(daily_windows, orient='index').rename_axis('hour'),
on='hour', how='left').rename(columns={0: 'window'})
dataframe = dataframe[[feature, 'date', 'window']]
dataframe.rename(columns={feature: 'timestamp'}, inplace=True)
dataframe['source'] = source
return dataframe
data = split_date_time(log_data, 'timestamp', 'log')
data = data.append(split_date_time(simulation_data, 'timestamp', 'sim'), ignore_index=True)
data['weekday'] = data.apply(lambda x: x.date.weekday(), axis=1)
g_criteria = {Metric.HOUR_EMD: 'window', Metric.DAY_EMD: 'weekday', Metric.DAY_HOUR_EMD: ['weekday', 'window'],
Metric.CAL_EMD: 'date'}
similarity = list()
for key, group in data.groupby(g_criteria[criteria]):
w_df = group.copy()
w_df = w_df.reset_index()
basetime = w_df.timestamp.min().floor(freq='H')
diftime = lambda x: (x['timestamp'] - basetime).total_seconds()
w_df['rel_time'] = w_df.apply(diftime, axis=1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
log_hist = np.histogram(w_df[w_df.source == 'log'].rel_time, density=True)
sim_hist = np.histogram(w_df[w_df.source == 'sim'].rel_time, density=True)
if np.isnan(np.sum(log_hist[0])) or np.isnan(np.sum(sim_hist[0])):
similarity.append({'window': key, 'sim_score': 1})
else:
similarity.append({'window': key, 'sim_score': wasserstein_distance(log_hist[0], sim_hist[0])})
return similarity
# =============================================================================
# Support methods
# =============================================================================
def create_task_alias(self, data, features):
"""
Create string alias for tasks names or tuples of tasks-roles names
Parameters
----------
features : list
Returns
-------
alias : alias dictionary
"""
data = data.to_dict('records')
subsec_set = set()
if isinstance(features, list):
task_list = [(x[features[0]], x[features[1]]) for x in data]
else:
task_list = [x[features] for x in data]
[subsec_set.add(x) for x in task_list]
variables = sorted(list(subsec_set))
characters = string.ascii_letters + string.digits
# characters = [chr(i) for i in range(0, len(variables))]
aliases = list(map(lambda i: characters[i], np.random.randint(0, len(characters), len(variables))))
alias = dict()
for i, _ in enumerate(variables):
alias[variables[i]] = aliases[i]
return alias
@staticmethod
def calculate_times(log):
"""Appends the indexes and relative time to the dataframe.
parms:
log: dataframe.
Returns:
Dataframe: The dataframe with the calculated features added.
"""
log['processing_time'] = 0
log['multitasking'] = 0
log = log.to_dict('records')
log = sorted(log, key=lambda x: (x['source'], x['caseid']))
for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):
events = list(group)
events = sorted(events, key=itemgetter('start_timestamp'))
for i in range(0, len(events)):
# In one-timestamp approach the first activity of the trace
# is taken as instantsince there is no previous timestamp
# to find a range
dur = (events[i]['end_timestamp'] - events[i]['start_timestamp']).total_seconds()
if i == 0:
wit = 0
else:
wit = (events[i]['start_timestamp'] - events[i - 1]['end_timestamp']).total_seconds()
events[i]['waiting_time'] = wit if wit >= 0 else 0
events[i]['processing_time'] = dur
return | pd.DataFrame.from_dict(log) | pandas.DataFrame.from_dict |
import numpy as np
import itertools
import time
import sys
import pandas as pd
from numpy.linalg import norm
from matplotlib import pyplot as plt
from matplotlib.colors import to_rgba
from matplotlib import rc
rc("font", **{"family": "serif", "serif": ["Computer Modern"], "size": 18})
rc("text", usetex=True)
from progress.bar import ShadyBar as Bar
from graphik.solvers.local_solver import LocalSolver
from graphik.solvers.solver_fabrik import solver_fabrik
from graphik.solvers.geometric_jacobian import jacobian_ik
from graphik.solvers.riemannian_solver import RiemannianSolver
from graphik.graphs.graph_base import RobotGraph
from graphik.robots.robot_base import RobotRevolute, RobotSpherical, RobotPlanar
from graphik.utils.dgp import (
adjacency_matrix_from_graph,
pos_from_graph,
graph_from_pos,
bound_smoothing,
)
from graphik.utils.geometry import trans_axis
from graphik.utils.utils import (
list_to_variable_dict,
list_to_variable_dict_spherical,
variable_dict_to_list,
best_fit_transform,
safe_arccos,
wraptopi,
bernoulli_confidence_jeffreys,
# bernoulli_confidence_normal_approximation
)
# Colours and styles used throughout
linestyles = ["-", "--", "-."]
line_colors = ["#000000", "#990000", "#294772", "#F6AA1C"] # , '#cccccc']
line_markers = ["s", "o", "d", "*"]
def bounds_to_spherical_bounds(bounds, max_val=np.inf):
new_bounds = []
for b in bounds:
new_bounds.append(max_val)
new_bounds.append(b)
return new_bounds
def run_multiple_experiments(
graph,
n_goals: int,
n_init: int,
zero_init: bool,
local_solver_params: dict,
riemann_params,
jacobian_params,
use_limits,
verbosity,
do_bound_smoothing,
local_algorithms,
riemannian_algorithms,
fabrik_max_iter,
trigsimp=False,
do_fabrik=True,
do_jacobian=True,
fabrik_tol=1e-6,
use_symbolic=True,
use_hess=True,
local_graph=None,
local_graph_map=None,
pose_goals=False,
):
results_list = []
bar = Bar("", max=n_goals, check_tty=False, hide_cursor=False)
for idx in range(n_goals):
q_goal = graph.robot.random_configuration()
if zero_init:
if graph.robot.spherical:
init = n_init * [graph.robot.n * 2 * [0.0]]
else:
init = n_init * [graph.robot.n * [0.0]]
else:
init = [graph.robot.random_configuration() for _ in range(n_init)]
init = variable_dict_to_list(init)
res = run_full_experiment(
graph,
local_solver_params,
riemann_params,
jacobian_params,
q_goal,
init,
use_limits,
verbosity,
do_bound_smoothing,
local_algorithms,
riemannian_algorithms,
trigsimp,
fabrik_max_iter,
fabrik_tol=fabrik_tol,
do_fabrik=do_fabrik,
do_jacobian=do_jacobian,
use_symbolic=use_symbolic,
use_hess=use_hess,
local_graph=local_graph,
local_graph_map=local_graph_map,
pose_goals=pose_goals,
)
results_list.append(res)
bar.next()
bar.finish()
return pd.concat(results_list, sort=True)
def run_full_experiment(
graph: RobotGraph,
solver_params: dict,
riemannian_params: dict,
jacobian_params: dict,
q_goal: dict,
init: list,
use_limits: bool = False,
verbosity: int = 2,
do_bound_smoothing: bool = False,
local_algorithms: list = None,
riemannian_algorithms: list = None,
trigsimp: bool = False,
fabrik_max_iter: int = 200,
fabrik_tol: float = 1e-6,
do_fabrik: bool = True,
do_jacobian: bool = True,
use_symbolic: bool = True,
use_hess: bool = True,
local_graph=None,
local_graph_map=None,
pose_goals=False,
) -> pd.DataFrame:
"""
Run an experiment with a variety of solvers for a single goal specified by ee_goals.
:param graph: instance of RobotGraph describing our robot
:param solver_params: dictionary with local solver parameters
:param riemannian_params: dictionary with Riemannian solver parameters
:param jacobian_params: dictionary with Jacobian solver parameters
:param q_goal: angular configuration specifying end-effector goals in dictionary form
:param init: a list of angular configurations to try as initial points
:param use_limits: boolean indicating whether to use the angular limits in graph.robot
:param verbosity: integer representing solver verbosity (0 to 2)
:param bound_smoothing: boolean indicating whether to initialize the Riemannian solver with bound smoothing (only
makes sense for a comparison with a single init)
:param local_algorithms: list of local algorithms to use with local_solver (e.g., "L-BFGS-B", "TNC")
:param riemannian_algorithms: list of algorithms to use with the riemannian solver
(e.g., "TrustRegions", "ConjugateGradient")
:param trigsimp: boolean indicating whether to use sympy.trigsimp on the local solver's cost function.
:param fabrik_max_iter: number of iterations for the FABRIK solver
:param fabrik_tol: tolerance for fabrik
:param do_fabrik: whether FABRIK should be run
:param do_jacobian: whether the Jacobian-based method should be run
:return: pd.DataFrame with all results
"""
if local_algorithms is None:
if use_limits:
local_algorithms = ["L-BFGS-B", "TNC", "SLSQP", "trust-constr"]
else:
local_algorithms = ["BFGS", "CG", "Newton-CG", "trust-exact"]
if riemannian_algorithms is None:
riemannian_algorithms = ["TrustRegions", "ConjugateGradient"]
results_list = [] # Stores all data frames to be eventually merged
# is_revolute3d = type(graph.robot) in (Revolute3dChain, Revolute3dTree)
is_revolute3d = type(graph.robot) is RobotRevolute
is_spherical = type(graph.robot) is RobotSpherical
is_planar = not (is_revolute3d or is_spherical)
# Set end effector goals from q_goal
ee_goals = {}
for ee in graph.robot.end_effectors:
if is_revolute3d:
ee_p = ee[0] if "p" in ee[0] else ee[1]
ee_goals[ee_p] = graph.robot.get_pose(q_goal, ee_p)
else:
ee_goals[ee[0]] = graph.robot.get_pose(q_goal, ee[0])
# Used by FABRIK
ee_goals_points = {}
for ee in graph.robot.end_effectors:
ee_goals_points[ee[0]] = graph.robot.get_pose(q_goal, ee[0]).trans
if pose_goals and not is_revolute3d:
ee_goals_points[ee[1]] = graph.robot.get_pose(q_goal, ee[1]).trans
elif pose_goals and is_revolute3d:
ee_goals_points[ee[1]] = (
graph.robot.get_pose(q_goal, ee[0])
.dot(trans_axis(graph.robot.axis_length, "z"))
.trans
)
# Deal with the local graph object (for spherical case)
if local_graph is None:
local_graph = graph
if pose_goals and is_planar:
ee_goals_local = {
key: graph.robot.get_pose(q_goal, key) for key in ee_goals_points
}
else:
ee_goals_local = ee_goals
ee_goals_local_eval = ee_goals
spherical_to_revolute_case = False
else:
if pose_goals:
ee_goals_local = {
local_graph_map[key]: graph.robot.get_pose(q_goal, key)
for key in ee_goals_points
}
ee_goals_local_eval = {
local_graph_map[ee[0]]: graph.robot.get_pose(q_goal, ee[0])
for ee in graph.robot.end_effectors
}
spherical_to_revolute_case = True
if len(local_algorithms) != 0:
use_q_in_cost = pose_goals and not spherical_to_revolute_case
local_solver = LocalSolver(solver_params)
if is_revolute3d or spherical_to_revolute_case:
local_solver.set_revolute_cost_function(
local_graph.robot,
ee_goals_local,
local_graph.robot.lb.keys(),
pose_cost=use_q_in_cost,
)
elif use_symbolic:
local_solver.set_symbolic_cost_function(
local_graph.robot,
ee_goals_local,
local_graph.robot.lb.keys(),
use_trigsimp=trigsimp,
use_hess=use_hess,
pose_cost=use_q_in_cost,
)
elif is_planar:
local_solver.set_procedural_cost_function(
local_graph.robot,
ee_goals_local,
pose_cost=False,
do_grad_and_hess=True,
)
else:
local_solver.set_procedural_cost_function(
local_graph.robot, ee_goals_local, pose_cost=use_q_in_cost
)
for algorithm in local_algorithms:
if is_revolute3d or spherical_to_revolute_case:
solve_fn = run_local_revolute_experiment
elif is_planar:
solve_fn = run_local_planar_experiment
local_solver.params["solver"] = algorithm
res_df = solve_fn(
local_graph,
ee_goals_local_eval,
local_solver,
-1,
init[0],
use_limits=use_limits,
use_hess=use_hess,
pose_goals=use_q_in_cost,
)
res_df["Solver"] = algorithm
results_list.append(res_df)
# Set up Riemannian solver sweep inputs
if len(riemannian_algorithms) != 0:
G_goal = graph.realization(q_goal)
X_goal = pos_from_graph(G_goal)
D_goal = graph.distance_matrix_from_joints(q_goal)
if pose_goals:
T_goal = ee_goals
else:
T_goal = graph.robot.get_pose(q_goal, f"p{graph.robot.n}")
for algorithm in riemannian_algorithms:
if is_revolute3d:
solve_fn = run_riemannian_revolute_experiment
elif is_spherical:
solve_fn = run_riemannian_spherical_experiment
elif is_planar:
solve_fn = run_riemannian_planar_experiment
# print("Running Riemannian {:} solver...".format(algorithm))
riemannian_params["solver"] = algorithm
riemannian_solver = RiemannianSolver(graph, riemannian_params)
res_df = solve_fn(
graph,
riemannian_solver,
-1,
D_goal,
X_goal,
ee_goals,
init[0],
T_goal=T_goal,
use_limits=use_limits,
verbosity=verbosity,
do_bound_smoothing=False,
)
res_df["Solver"] = "Riemannian " + algorithm
results_list.append(res_df)
if do_bound_smoothing:
# print("Running Riemannian {:} solver with BS...".format(algorithm))
riemannian_params["solver"] = algorithm
riemannian_solver = RiemannianSolver(graph, riemannian_params)
res_df_bs = solve_fn(
graph,
riemannian_solver,
-1,
D_goal,
X_goal,
ee_goals,
init[0],
T_goal=T_goal,
use_limits=use_limits,
verbosity=verbosity,
do_bound_smoothing=True,
pose_goals=use_q_in_cost,
)
res_df_bs["Solver"] = "Riemannian " + algorithm + " + BS"
results_list.append(res_df_bs)
# Run FABRIK solver
if do_fabrik:
goals_fabrik, goals_index_fabrik = retrieve_goals(ee_goals_points)
res_fabrik = run_full_fabrik_sweep_experiment(
graph,
goals_fabrik,
goals_index_fabrik,
ee_goals=ee_goals,
initial=init,
use_limits=use_limits,
max_iteration=fabrik_max_iter,
verbosity=0,
tol=fabrik_tol,
)
res_fabrik["Solver"] = "FABRIK"
results_list.append(res_fabrik)
# Jacobian-based solver (dls-inverse)
if do_jacobian:
if is_revolute3d:
solve_fn = run_jacobian_revolute_experiment
elif is_spherical:
solve_fn = run_jacobian_spherical_experiment
elif is_planar:
solve_fn = run_jacobian_planar_experiment
local_solver_jac = LocalSolverJac(graph.robot)
res_jacobian = solve_fn(
graph, local_solver_jac, init[0], q_goal, use_limits=use_limits
)
# if not spherical_to_revolute_case: # Use the revolute formulation if a revolute equivalent is provided
# res_jacobian = run_full_jacobian_sweep_experiment(
# graph, ee_goals, init, q_goal, params=jacobian_params, use_limits=use_limits
# )
# else:
# q_goal_revolute = list_to_variable_dict(flatten(q_goal.values()))
# res_jacobian = run_full_jacobian_sweep_experiment(
# local_graph, ee_goals_local, init, q_goal_revolute, params=jacobian_params, use_limits=use_limits
# )
res_jacobian["Solver"] = "Jacobian"
results_list.append(res_jacobian)
# Joint it all together
results = pd.concat(results_list, sort=True)
results["Goal"] = str(q_goal) # TODO: is there a better way to store the goal?
return results
def process_experiment(data: pd.DataFrame, pos_threshold=0.01, rot_threshold=0.01):
# Summarize angular constraint violation and squared end-effector error
for algorithm in data["Solver"].unique():
data_alg = data[data["Solver"] == algorithm]
successes = (
(data_alg["Pos Error"] < pos_threshold)
& (data_alg["Rot Error"] < rot_threshold)
& (data_alg["Limits Violated"] == False)
)
print("Solver: {:}".format(algorithm))
print(data_alg[successes]["Pos Error"].describe())
print(data_alg[successes]["Rot Error"].describe())
print("Success rate over {:} runs: ".format(data_alg["Pos Error"].count()))
print(
100
* data_alg[successes]["Pos Error"].count()
/ data_alg["Pos Error"].count()
)
print(data_alg[successes]["Runtime"].mean())
def run_riemannian_revolute_experiment(
graph: RobotGraph,
solver: RiemannianSolver,
n_per_dim: int,
D_goal,
Y_goal,
ee_goals: dict,
Y_init: dict,
T_goal=None,
use_limits: bool = False,
verbosity=2,
do_bound_smoothing: bool = False,
pose_goals: bool = False,
) -> pd.DataFrame:
"""
:param graph:
:param solver:
:param n_per_dim:
:param D_goal:
:param X_goal:
:param ee_goals:
:param T_goal:
:param init:
:param use_limits:
:param verbosity:
:param bound_smoothing:
:return:
"""
# axis length determines q node distances
axis_len = graph.robot.axis_length
# Determine align indices
align_ind = list(np.arange(graph.dim + 1))
for name in ee_goals.keys():
align_ind.append(graph.node_ids.index(name))
# Set known positions
goals = {}
for key in ee_goals:
goals[key] = ee_goals[key].trans
goals["q" + key[1:]] = ee_goals[key].dot(trans_axis(axis_len, "z")).trans
G = graph.complete_from_pos(goals)
# Adjacency matrix
omega = adjacency_matrix_from_graph(G)
init_angles = list_to_variable_dict(Y_init)
G_init = graph.realization(init_angles)
Y_init = pos_from_graph(G_init)
# Set bounds if using bound smoothing
bounds = None
if do_bound_smoothing:
lb, ub = bound_smoothing(G) # will take goals and jli
bounds = (lb, ub)
# Solve problem
Y_opt, optlog = solver.solve_experiment_wrapper(
D_goal,
omega,
bounds=bounds,
X=Y_init,
use_limits=use_limits,
verbosity=verbosity,
)
f_x = optlog["final_values"]["f(x)"]
grad_norm = optlog["final_values"]["gradnorm"]
runtime = optlog["final_values"]["time"]
num_iters = optlog["final_values"]["iterations"]
# Check for linear/planar solutions in the 3D case, pad with zeros to fix
if Y_opt.shape[1] < graph.dim:
Y_opt = np.hstack(
[Y_opt, np.zeros((Y_opt.shape[0], graph.dim - Y_opt.shape[1]))]
)
# Get solution config
R, t = best_fit_transform(Y_opt[align_ind, :], Y_goal[align_ind, :])
P_e = (R @ Y_opt.T + t.reshape(graph.dim, 1)).T
G_e = graph_from_pos(P_e, graph.node_ids)
q_sol = graph.robot.joint_variables(G_e, T_goal)
# If limits are used check for angle violations
limit_violations = list_to_variable_dict(graph.robot.n * [0])
limits_violated = False
if use_limits:
for key in graph.robot.limited_joints:
limit_violations[key] = max(
graph.robot.lb[key] - q_sol[key], q_sol[key] - graph.robot.ub[key]
)
if limit_violations[key] > 0.01 * graph.robot.ub[key]:
limits_violated = True
# if limits_violated:
# print("--------------------------")
# print("Method: Riemannian")
# print(
# "Angle violated! \n Lower bounds: {:} \n Upper bounds: {:}".format(
# graph.robot.lb, graph.robot.ub
# )
# )
# print("q_sol: {:}".format(q_sol))
# print("--------------------------")
# Calculate final error
D_sol = graph.distance_matrix_from_joints(q_sol)
e_D = omega * (np.sqrt(D_sol) - np.sqrt(D_goal))
max_dist_error = abs(max(e_D.min(), e_D.max(), key=abs))
err_pos = 0.0
err_rot = 0.0
for key in ee_goals:
T_sol = graph.robot.get_pose(list_to_variable_dict(q_sol), key)
T_sol.rot.as_matrix()[0:3, 0:2] = ee_goals[key].rot.as_matrix()[0:3, 0:2]
err_pos += norm(ee_goals[key].trans - T_sol.trans)
# err_rot += norm((ee_goals[key].rot.dot(T_sol.rot.inv())).log())
z1 = ee_goals[key].rot.as_matrix()[0:3, -1]
z2 = T_sol.rot.as_matrix()[0:3, -1]
err_rot += safe_arccos(z1.dot(z2))
columns = [
"Init.",
"Goals",
"f(x)",
"Gradient Norm",
"Iterations",
"Runtime",
"Solution",
"Solution Config",
"Pos Error",
"Rot Error",
"Limit Violations",
"Limits Violated",
"Max Dist Error",
]
data = dict(
zip(
columns,
[
[Y_init],
[ee_goals],
[f_x],
[grad_norm],
[num_iters],
[runtime],
[Y_opt],
[q_sol],
[err_pos],
[err_rot],
[limit_violations],
[limits_violated],
[max_dist_error],
],
)
)
results = pd.DataFrame(data)
results["Bound Smoothing"] = do_bound_smoothing
return results
def run_local_revolute_experiment(
graph: RobotGraph,
ee_goals: dict,
solver: LocalSolver,
n_per_dim: int,
init: list,
use_limits=False,
use_hess=True,
pose_goals=False,
) -> pd.DataFrame:
"""
:param graph:
:param solver: LocalSolver object with cost-function pre-set
:param n_per_dim:
:param init: list of specific angle combinations to try
:param angle_tol: tolerance on angle
:return:
"""
problem_params = {}
if use_limits:
problem_params["angular_limits"] = graph.robot.ub # Assumes symmetrical limits
problem_params["initial_guess"] = list_to_variable_dict(init)
results = solver.solve(graph, problem_params)
solutions = results.x
# Wrap to within [-pi, pi]
solutions = [wraptopi(val) for val in solutions]
q_sol = list_to_variable_dict(solutions)
# If limits are used check for angle violations
limit_violations = list_to_variable_dict(graph.robot.n * [0])
limits_violated = False
if use_limits:
for key in graph.robot.limited_joints:
limit_violations[key] = max(
graph.robot.lb[key] - q_sol[key], q_sol[key] - graph.robot.ub[key]
)
if limit_violations[key] > 0.01 * graph.robot.ub[key]:
limits_violated = True
# if limits_violated:
# print("--------------------------")
# print("Method: Local")
# print(
# "Angle violated! \n Lower bounds: {:} \n Upper bounds: {:}".format(
# graph.robot.lb, graph.robot.ub
# )
# )
# print("q_sol: {:}".format(q_sol))
# print("--------------------------")
err_pos = 0.0
err_rot = 0.0
for key in ee_goals:
T_sol = graph.robot.get_pose(list_to_variable_dict(q_sol), key)
err_pos += norm(ee_goals[key].trans - T_sol.trans)
z1 = ee_goals[key].rot.as_matrix()[0:3, -1]
z2 = T_sol.rot.as_matrix()[0:3, -1]
err_rot += safe_arccos(z1.dot(z2))
# T_sol.rot.as_matrix()[0:3,0:2] = ee_goals[key].rot.as_matrix()[0:3,0:2]
# err_rot += norm((ee_goals[key].rot.dot(T_sol.rot.inv())).log())
# print(ee_goals)
# print(T_sol)
# print("\n")
grad_norm = norm(solver.grad(results.x))
if use_hess:
smallest_eig = min(np.linalg.eigvalsh(solver.hess(results.x).astype(float)))
runtime = results.runtime
num_iters = results.nit
columns = [
"Init.",
"Goals",
"Iterations",
"Runtime",
"Solution Config",
"Pos Error",
"Rot Error",
"Limit Violations",
"Limits Violated",
]
data = dict(
zip(
columns,
[
[init],
[ee_goals],
[num_iters],
[runtime],
[q_sol],
[err_pos],
[err_rot],
[limit_violations],
[limits_violated],
],
)
)
return pd.DataFrame(data)
def run_riemannian_planar_experiment(
graph: RobotGraph,
solver: RiemannianSolver,
n_per_dim: int,
D_goal,
Y_goal,
ee_goals: dict,
q_init: dict,
T_goal=None,
use_limits: bool = False,
verbosity=2,
do_bound_smoothing: bool = False,
pose_goals: bool = False,
) -> pd.DataFrame:
"""
:param graph:
:param solver:
:param n_per_dim:
:param D_goal:
:param X_goal:
:param ee_goals:
:param T_goal:
:param init:
:param use_limits:
:param verbosity:
:param bound_smoothing:
:return:
"""
# Determine align indices
align_ind = list(np.arange(graph.dim + 1))
for name in ee_goals.keys():
align_ind.append(graph.node_ids.index(name))
# Set known positions
# G = graph.complete_from_pos(ee_goals)
G = graph.complete_from_pose_goal(ee_goals)
# Adjacency matrix
omega = adjacency_matrix_from_graph(G)
q_init = list_to_variable_dict(q_init)
G_init = graph.realization(q_init)
Y_init = pos_from_graph(G_init)
# Set bounds if using bound smoothing
bounds = None
if do_bound_smoothing:
lb, ub = bound_smoothing(G) # will take goals and jli
bounds = (lb, ub)
# Solve problem
Y_opt, optlog = solver.solve_experiment_wrapper(
D_goal,
omega,
bounds=bounds,
X=Y_init,
use_limits=use_limits,
verbosity=verbosity,
)
f_x = optlog["final_values"]["f(x)"]
grad_norm = optlog["final_values"]["gradnorm"]
runtime = optlog["final_values"]["time"]
num_iters = optlog["final_values"]["iterations"]
# Check for linear/planar solutions in the 3D case, pad with zeros to fix
if Y_opt.shape[1] < graph.dim:
Y_opt = np.hstack(
[Y_opt, np.zeros((Y_opt.shape[0], graph.dim - Y_opt.shape[1]))]
)
# Get solution config
R, t = best_fit_transform(Y_opt[align_ind, :], Y_goal[align_ind, :])
P_e = (R @ Y_opt.T + t.reshape(graph.dim, 1)).T
G_e = graph_from_pos(P_e, graph.node_ids)
q_sol = graph.robot.joint_variables(G_e)
# If limits are used check for angle violations
limit_violations = list_to_variable_dict(graph.robot.n * [0])
limits_violated = False
if use_limits:
for key in q_sol:
limit_violations[key] = max(
graph.robot.lb[key] - q_sol[key], q_sol[key] - graph.robot.ub[key]
)
if limit_violations[key] > 0.01 * graph.robot.ub[key]:
limits_violated = True
# Calculate final error
D_sol = graph.distance_matrix_from_joints(q_sol)
e_D = omega * (np.sqrt(D_sol) - np.sqrt(D_goal))
max_dist_error = abs(max(e_D.min(), e_D.max(), key=abs))
err_pos = 0
err_rot = 0
for key in ee_goals:
T_sol = graph.robot.get_pose(list_to_variable_dict(q_sol), key)
err_pos += norm(ee_goals[key].trans - T_sol.trans)
err_rot += safe_arccos(
(ee_goals[key].rot.dot(T_sol.rot.inv())).as_matrix()[0, 0]
)
data = dict(
[
("Init.", [Y_init]),
("Goals", [ee_goals]),
("f(x)", [f_x]),
("Gradient Norm", [grad_norm]),
("Iterations", [num_iters]),
("Runtime", [runtime]),
("Solution", [Y_opt]),
("Solution Config", [q_sol]),
("Pos Error", [err_pos]),
("Rot Error", [err_rot]),
("Limit Violations", [limit_violations]),
("Limits Violated", [limits_violated]),
("Max Dist Error", [max_dist_error]),
]
)
results = pd.DataFrame(data)
results["Bound Smoothing"] = do_bound_smoothing
return results
def run_riemannian_spherical_experiment(
graph: RobotGraph,
solver: RiemannianSolver,
n_per_dim: int,
D_goal,
Y_goal,
ee_goals: dict,
q_init: dict,
T_goal=None,
use_limits: bool = False,
verbosity=2,
do_bound_smoothing: bool = False,
pose_goals: bool = False,
) -> pd.DataFrame:
"""
:param graph:
:param solver:
:param n_per_dim:
:param D_goal:
:param X_goal:
:param ee_goals:
:param T_goal:
:param init:
:param use_limits:
:param verbosity:
:param bound_smoothing:
:return:
"""
# Determine align indices
align_ind = list(np.arange(graph.dim + 1))
for name in ee_goals.keys():
align_ind.append(graph.node_ids.index(name))
# Set known positions
# G = graph.complete_from_pos(ee_goals)
G = graph.complete_from_pose_goal(ee_goals)
# Adjacency matrix
omega = adjacency_matrix_from_graph(G)
q_init = list_to_variable_dict_spherical(q_init, in_pairs=True)
G_init = graph.realization(q_init)
Y_init = pos_from_graph(G_init)
# Set bounds if using bound smoothing
bounds = None
if do_bound_smoothing:
lb, ub = bound_smoothing(G) # will take goals and jli
bounds = (lb, ub)
# Solve problem
Y_opt, optlog = solver.solve_experiment_wrapper(
D_goal,
omega,
bounds=bounds,
X=Y_init,
use_limits=use_limits,
verbosity=verbosity,
)
f_x = optlog["final_values"]["f(x)"]
grad_norm = optlog["final_values"]["gradnorm"]
runtime = optlog["final_values"]["time"]
num_iters = optlog["final_values"]["iterations"]
# Check for linear/planar solutions in the 3D case, pad with zeros to fix
if Y_opt.shape[1] < graph.dim:
Y_opt = np.hstack(
[Y_opt, np.zeros((Y_opt.shape[0], graph.dim - Y_opt.shape[1]))]
)
# Get solution config
R, t = best_fit_transform(Y_opt[align_ind, :], Y_goal[align_ind, :])
P_e = (R @ Y_opt.T + t.reshape(graph.dim, 1)).T
G_e = graph_from_pos(P_e, graph.node_ids)
q_sol = graph.robot.joint_variables(G_e)
# If limits are used check for angle violations
limit_violations = list_to_variable_dict(graph.robot.n * [0])
limits_violated = False
if use_limits:
for key in q_sol:
limit_violations[key] = max(
graph.robot.lb[key] - q_sol[key][1], q_sol[key][1] - graph.robot.ub[key]
)
if limit_violations[key] > 0.01 * graph.robot.ub[key]:
limits_violated = True
# Calculate final error
D_sol = graph.distance_matrix_from_joints(q_sol)
e_D = omega * (np.sqrt(D_sol) - np.sqrt(D_goal))
max_dist_error = abs(max(e_D.min(), e_D.max(), key=abs))
err_pos = 0
err_rot = 0
for key in ee_goals:
T_sol = graph.robot.get_pose(q_sol, key)
err_pos += norm(ee_goals[key].trans - T_sol.trans)
z1 = ee_goals[key].rot.as_matrix()[0:3, -1]
z2 = graph.robot.get_pose(q_sol, key).rot.as_matrix()[0:3, -1]
err_rot += safe_arccos(z1.dot(z2))
data = dict(
[
("Init.", [Y_init]),
("Goals", [ee_goals]),
("f(x)", [f_x]),
("Gradient Norm", [grad_norm]),
("Iterations", [num_iters]),
("Runtime", [runtime]),
("Solution", [Y_opt]),
("Solution Config", [q_sol]),
("Pos Error", [err_pos]),
("Rot Error", [err_rot]),
("Limit Violations", [limit_violations]),
("Limits Violated", [limits_violated]),
("Max Dist Error", [max_dist_error]),
]
)
results = | pd.DataFrame(data) | pandas.DataFrame |
import sklearn
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
#set path to necessary data
acid_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/acid_features.csv'
amine_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/amine_features.csv'
catalyst_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/catalyst_features.csv'
data_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/Boronic Acid Database.xlsx'
partial_charge_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/boron_partial_charges_xtb.csv'
#Import Data
acid_features = pd.read_csv(acid_path)
amine_features = pd.read_csv(amine_path)
catalyst_features = pd.read_csv(catalyst_path)
partial_charges = pd.read_csv(partial_charge_path, header=None)
database = | pd.read_excel(data_path) | pandas.read_excel |
"""
"""
__version__='192.168.3.11.dev1'
import sys
import os
import logging
import pandas as pd
import re
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
logger = logging.getLogger('PT3S')
try:
from PT3S import Rm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Rm - trying import Rm instead ... maybe pip install -e . is active ...'))
import Rm
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
def addResVecToDfAlarmEreignisse(
dfAlarmEreignisse
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
):
"""
dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
BZKat: Betriebszustandskategorie des Alarms
Returns:
dfAlarmEreignisse with 2 Cols added:
resIDBase: die 1. OrtID von OrteIDs
dfResVec: der resVec des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
dfAlarmEreignisse['resIDBase']=dfAlarmEreignisse['OrteIDs'].apply(lambda x: x[0])
### Ergebnisvektor fuer alle Orte bestimmen
dfResVecs={}
dfResVecsLst=[]
for indexAlarm, rowAlarm in dfAlarmEreignisse.iterrows():
resIDBase=rowAlarm['resIDBase']
if resIDBase in dfResVecs.keys():
# resIDBase schon behandelt
dfResVecsLst.append(dfResVecs[resIDBase])
continue
# Spalten basierend auf resIDBase bestimmen
ErgIDs=[resIDBase+ext for ext in Rm.ResChannelTypesAll]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs] # jede Spalte koennte anstatt "normal" als IMDI. vorhanden sein
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Ergebnisspalten
if rowAlarm['LDSResBaseType']=='SEG':
dfFiltered=TCsLDSRes1.filter(items=ErgIDsAll,axis=1)
else:
dfFiltered=TCsLDSRes2.filter(items=ErgIDsAll,axis=1)
# Ergebnisspalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfFiltered.name=resIDBase
dfResVec=dfFiltered.rename(columns=colDct)
# Ergebnisvektor merken
dfResVecs[resIDBase]=dfResVec
dfResVecsLst.append(dfResVec)
logger.debug("{:s}resIDBase: {:50s} Anzahl gefundener Spalten in TCsLDSRes: {:d}".format(logStr, resIDBase, len(dfResVec.columns.to_list())))
dfAlarmEreignisse['dfResVec']=dfResVecsLst
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise e
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fGenAlarmVisTimeSpan(
tA
,tE
# alle nachfolgenden Werte sollten dieselbe Einheit haben und in dieser Einheit ganzzahlig sein
,timeSpan=pd.Timedelta('25 Minutes')
,timeRoundStr='1T'
,timeBoundaryMin=pd.Timedelta('3 Minutes')
,timeRef='A' # Alarme die laenger sind: Anfang oder Ende werden mit timeSpan dargestellt
):
"""
erzeugt eine Zeitspanne in welcher ein Alarm Zwecks Analyse dargestellt wird
tA, tE sind Anfang und Ende des Alarms
diese werden ab- (tA) bzw. aufgerundet (tE) mit timeRoundStr
zwischen den gerundeten Zeiten und tA/tE soll mindestens timeBoundaryMin liegen
wenn nicht, wird timeBoundaryMin auf tA/tE angewendet und dann wird gerundet
timeSpan ist die gewuenschte minimale Zeitspanne
Alarme die kuerzer sind werden mit timeSpan dargestellt
Alarme die laenger sind: Anfang oder Ende wird mit timeSpan dargestellt
"""
# Zeiten ab- und aufrunden
timeStart=tA.floor(freq=timeRoundStr)
timeEnd=tE.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Mindestabstand pruefen
if tA-timeStart < timeBoundaryMin:
timeStart=tA-timeBoundaryMin
timeStart= timeStart.floor(freq=timeRoundStr)
if timeEnd-tE < timeBoundaryMin:
timeEnd=tE+timeBoundaryMin
timeEnd= timeEnd.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Zeitspanne pruefen
timeLeft=timeSpan-(timeEnd-timeStart)
if timeLeft > pd.Timedelta('0 Seconds'): # die aufgerundete Alarmzeit ist kuerzer als timeSpan; timeSpan wird dargestellt
timeStart=timeStart-timeLeft/2
timeStart= timeStart.floor(freq=timeRoundStr)
timeEnd=timeEnd+timeLeft/2
timeEnd= timeEnd.ceil(freq=timeRoundStr)
else:
# die aufgerundete Alarmzeit ist laenger als timeSpan; A oder E wird mit timeSpan wird dargestellt
if timeRef=='A':
timeM=tA.floor(freq=timeRoundStr)
else:
timeM=tE.ceil(freq=timeRoundStr)
timeStart=timeM-timeSpan/2
timeEnd=timeM+timeSpan/2
if timeEnd-timeStart > timeSpan:
timeEnd=timeStart+timeSpan
ZeitbereichSel=timeEnd-timeStart
if ZeitbereichSel <= pd.Timedelta('1 Minutes'):
bysecond=list(np.arange(0,60,1))
byminute=None
elif ZeitbereichSel <= pd.Timedelta('3 Minutes'):
bysecond=list(np.arange(0,60,5))
byminute=None
elif ZeitbereichSel > pd.Timedelta('3 Minutes') and ZeitbereichSel <= pd.Timedelta('5 Minutes'):
bysecond=list(np.arange(0,60,15))
byminute=None
elif ZeitbereichSel > pd.Timedelta('5 Minutes') and ZeitbereichSel <= pd.Timedelta('20 Minutes'):
bysecond=list(np.arange(0,60,30))
byminute=None
elif ZeitbereichSel > pd.Timedelta('20 Minutes') and ZeitbereichSel <= pd.Timedelta('30 Minutes'):
bysecond=None
byminute=list(np.arange(0,60,1))
else:
bysecond=None
byminute=list(np.arange(0,60,3))
return timeStart, timeEnd, byminute, bysecond
def rptAlarms(
pdfErgFile='rptAlarms.pdf'
,figsize=Rm.DINA2q
,dpi=Rm.dpiSize
,dfAlarmStatistik=pd.DataFrame() # 1 Zeile pro SEG; Spalten mit Alarm-Informationen zum SEG
,dfAlarmEreignisse= | pd.DataFrame() | pandas.DataFrame |
import random
from collections import defaultdict
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import ray
from ray.tests.conftest import * # noqa
from ray.data.block import BlockAccessor
from ray.data.tests.conftest import * # noqa
from ray.data._internal.push_based_shuffle import PushBasedShufflePlan
@pytest.mark.parametrize("use_push_based_shuffle", [False, True])
def test_sort_simple(ray_start_regular, use_push_based_shuffle):
ctx = ray.data.context.DatasetContext.get_current()
try:
original = ctx.use_push_based_shuffle
ctx.use_push_based_shuffle = use_push_based_shuffle
num_items = 100
parallelism = 4
xs = list(range(num_items))
random.shuffle(xs)
ds = ray.data.from_items(xs, parallelism=parallelism)
assert ds.sort().take(num_items) == list(range(num_items))
# Make sure we have rows in each block.
assert len([n for n in ds.sort()._block_num_rows() if n > 0]) == parallelism
assert ds.sort(descending=True).take(num_items) == list(
reversed(range(num_items))
)
assert ds.sort(key=lambda x: -x).take(num_items) == list(
reversed(range(num_items))
)
# Test empty dataset.
ds = ray.data.from_items([])
s1 = ds.sort()
assert s1.count() == 0
assert s1.take() == ds.take()
ds = ray.data.range(10).filter(lambda r: r > 10).sort()
assert ds.count() == 0
finally:
ctx.use_push_based_shuffle = original
@pytest.mark.parametrize("use_push_based_shuffle", [False, True])
def test_sort_partition_same_key_to_same_block(
ray_start_regular, use_push_based_shuffle
):
ctx = ray.data.context.DatasetContext.get_current()
try:
original = ctx.use_push_based_shuffle
ctx.use_push_based_shuffle = use_push_based_shuffle
num_items = 100
xs = [1] * num_items
ds = ray.data.from_items(xs)
sorted_ds = ds.repartition(num_items).sort()
# We still have 100 blocks
assert len(sorted_ds._block_num_rows()) == num_items
# Only one of them is non-empty
count = sum(1 for x in sorted_ds._block_num_rows() if x > 0)
assert count == 1
# That non-empty block contains all rows
total = sum(x for x in sorted_ds._block_num_rows() if x > 0)
assert total == num_items
finally:
ctx.use_push_based_shuffle = original
@pytest.mark.parametrize("num_items,parallelism", [(100, 1), (1000, 4)])
@pytest.mark.parametrize("use_push_based_shuffle", [False, True])
def test_sort_arrow(ray_start_regular, num_items, parallelism, use_push_based_shuffle):
ctx = ray.data.context.DatasetContext.get_current()
try:
original = ctx.use_push_based_shuffle
ctx.use_push_based_shuffle = use_push_based_shuffle
a = list(reversed(range(num_items)))
b = [f"{x:03}" for x in range(num_items)]
shard = int(np.ceil(num_items / parallelism))
offset = 0
dfs = []
while offset < num_items:
dfs.append(
pd.DataFrame(
{"a": a[offset : offset + shard], "b": b[offset : offset + shard]}
)
)
offset += shard
if offset < num_items:
dfs.append( | pd.DataFrame({"a": a[offset:], "b": b[offset:]}) | pandas.DataFrame |
# <NAME>
# python 3.7
"""
To calculate the extremes of the carbon fluxes based on carbon flux anomalies in gC.
The code is fairly flexible to pass multiple filters to the code.
Output:
* Saving the binarys of extremes
* Saving the TCE binaries at multiple lags [0-4 months)
"""
import os
import netCDF4 as nc4
import numpy as np
import pandas as pd
import datetime as dt
import seaborn as sns
import argparse
from scipy import stats
from functions import time_dim_dates, index_and_dates_slicing, norm, geo_idx, patch_with_gaps_and_eventsize
""" Arguments to input while running the python file
--percentile (-per) : percentile under consideration
looking at the negative/positive tail of gpp events: {eg.1,5,10,90,95,99}
--th_type : Thresholds can be computed at each tail i.e. 'ind' or 'common'.
'common' means that total number of events greater that the modulus of anomalies represent 'per' percentile
--sources (-src) : the models that you want to analyze, separated by hyphens or 'all' for all the models
--variable (-var) : the variable to analyze gpp/nep/npp/nbp
--window (wsize) : time window size in years
# Running: run calc_extremes.py -src cesm -var gpp
"""
print ("Last edit on May 08, 2020")
# The abriviation of the models that will be analyzed:
source_code = { 'cesm' : 'CESM2',
'can' : 'CanESM5',
'ipsl' : 'IPSL-CM6A-LR',
'bcc' : 'BCC-CSM2-MR',
'cnrn-e': 'CNRM-ESM2-1',
'cnrn-c': 'CNRM-CM6-1' }
parser = argparse.ArgumentParser()
parser.add_argument('--percentile' ,'-per' , help = "Threshold Percentile?" , type= int, default= 5 )
parser.add_argument('--th_type' ,'-th' , help = "Threshold Percentile?" , type= str, default= 'common' )
parser.add_argument('--sources' ,'-src' , help = "Which model(s) to analyse?" , type= str, default= 'all' )
parser.add_argument('--variable' ,'-var' , help = "variable? gpp/npp/nep/nbp,,,," , type= str, default= 'gpp' )
parser.add_argument('--window' ,'-wsize' , help = "window size (25 years)?" , type= int, default= 25 )
args = parser.parse_args()
# The inputs:
per = int (args.percentile)
th_type = str (args.th_type)
src = str (args.sources)
variable_run= str (args.variable)
window = int (args.window)
# Model(s) to analyze:
# --------------------
source_selected = []
if len(src.split('-')) >1:
source_selected = src.split('-')
elif src in ['all', 'a']:
source_selected = list(source_code.values() )
elif len(src.split('-')) == 1:
if src in source_code.keys():
source_selected = [source_code[src]]
else:
print (" Enter a valid source id")
#running : run calc_extremes.py -per 5 -var nbp -src cesm
# Reading the dataframe of the selected files
# -------------------------------------------
cori_scratch = '/global/cscratch1/sd/bharat/' # where the anomalies per slave rank are saved
in_path = '/global/homes/b/bharat/results/data_processing/' # to read the filters
#cmip6_filepath_head = '/global/homes/b/bharat/cmip6_data/CMIP6/'
cmip6_filepath_head = '/global/cfs/cdirs/m3522/cmip6/CMIP6/'
#web_path = '/project/projectdirs/m2467/www/bharat/'
web_path = '/global/homes/b/bharat/results/web/'
# exp is actually 'historical + ssp585' but saved as 'ssp585'
exp = 'ssp585'
# Common members per model
# ------------------------
common_members = {}
for source_run in source_selected:
common_members [source_run] = pd.read_csv (cori_scratch + 'add_cmip6_data/common_members/%s_%s_common_members.csv'%(source_run,exp),
header=None).iloc[:,0]
# The spreadsheet with all the available data of cmip 6
# -----------------------------------------------------
df_files = pd.read_csv(in_path + 'df_data_selected.csv')
temp = df_files.copy(deep = True)
# Saving the path of area and lf
filepath_areacella = {}
filepath_sftlf = {}
for s_idx, source_run in enumerate(source_selected):
filters = (temp['source_id'] == source_run) & (temp['variable_id'] == variable_run) # original Variable
filters_area = (temp['source_id'] == source_run) & (temp['variable_id'] == 'areacella') # areacella
filters_lf = (temp['source_id'] == source_run) & (temp['variable_id'] == 'sftlf') # land fraction
#passing the filters to the dataframe
df_tmp = temp[filters]
df_tmp_area = temp[filters_area]
df_tmp_lf = temp[filters_lf]
for member_run in common_members [source_run]:
if source_run == 'BCC-CSM2-MR':
filepath_area = "/global/homes/b/bharat/extra_cmip6_data/areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
filepath_lf = "/global/homes/b/bharat/extra_cmip6_data/sftlf_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
else:
filters_area = (temp['variable_id'] == 'areacella') & (temp['source_id'] == source_run)
filters_lf = (temp['variable_id'] == 'sftlf') & (temp['source_id'] == source_run)
filepath_area = cmip6_filepath_head + "/".join(np.array(temp[filters_area].iloc[-1]))
filepath_lf = cmip6_filepath_head + "/".join(np.array(temp[filters_lf].iloc[-1]))
filepath_areacella [source_run] = filepath_area
filepath_sftlf [source_run] = filepath_lf
# Extracting the area and land fractions of different models
# ==========================================================
data_area = {}
data_lf = {}
for source_run in source_selected:
data_area [source_run] = nc4.Dataset (filepath_areacella[source_run]) . variables['areacella']
data_lf [source_run] = nc4.Dataset (filepath_sftlf [source_run]) . variables['sftlf']
# Saving the paths of anomalies
# hier. : source_id > member_id
# ------------------------------------
paths = {}
for source_run in source_selected:
paths[source_run] = {}
for source_run in source_selected:
for member_run in common_members [source_run]:
saved_ano = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s/'%(source_run,exp,member_run,variable_run)
paths[source_run][member_run] = saved_ano
del saved_ano
# Reading and saving the data:
# ----------------------------
nc_ano = {}
for source_run in source_selected:
nc_ano[source_run] = {}
for source_run in source_selected:
for member_run in common_members [source_run]:
nc_ano[source_run][member_run] = nc4.Dataset(paths[source_run][member_run] + '%s_%s_%s_%s_anomalies_gC.nc'%(source_run,exp,member_run,variable_run))
# Arranging Time Array for plotting and calling
# --------------------------------------------
win_len = 12 * window #number of months in window years
total_years = 251 #years from 1850 to 2100
total_months= total_years * 12
dates_ar = time_dim_dates( base_date = dt.date(1850,1,1),
total_timestamps = 3012 )
start_dates = np.array( [dates_ar[i*win_len] for i in range(int(total_months/win_len))]) #list of start dates of 25 year window
end_dates = np.array( [dates_ar[i*win_len+win_len -1] for i in range(int(total_months/win_len))]) #list of end dates of the 25 year window
idx_yr_2100 = 3012 # upper open index 2100 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3012]
idx_yr_2014 = 1980 # upper open index 2014 from the year 1850 if the data is monthly i.e. for complete TS write ts[:1980]
idx_yr_2099 = 3000 # upper open index 2099 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3000]
# Initiation:
# -----------
def TS_Dates_and_Index (dates_ar = dates_ar,start_dates = start_dates, end_dates=end_dates ):
"""
Returns the TS of the dates and index of consecutive windows of len 25 years
Parameters:
-----------
dates_ar : an array of dates in datetime.date format
the dates are chosen from this array
start_dates: an array of start dates, the start date will decide the dates and index of the first entry for final time series for that window
end_dates: similar to start_dates but for end date
Returns:
--------
dates_win: a 2-d array with len of start dates/ total windows and each row containing the dates between start and end date
idx_dates_win : a 2-d array with len of start dates/ total windows and each row containing the index of dates between start and end date
"""
idx_dates_win = [] #indicies of time in 25yr windows
dates_win = [] #sel dates from time variables in win_len windows
for i in range(len(start_dates)):
idx_loc, dates_loc = index_and_dates_slicing(dates_ar,start_dates[i],end_dates[i]) # see functions.py
idx_dates_win . append (idx_loc)
dates_win . append (dates_loc)
return np.array(dates_win), np.array(idx_dates_win)
# Calling the function "ts_dates_and_index"; Universal for rest of the code
dates_win, idx_dates_win = TS_Dates_and_Index ()
# The saving the results in a dictionary
# --------------------------------------
Results = {}
for source_run in source_selected:
Results[source_run] = {}
for member_run in common_members [source_run]:
Results[source_run][member_run] = {}
# Calculation of thresholds (rth percentile at each tail):
# ------------------------------------------------------------
def Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per):
"""
In this method the 1 percentile threshold is calculated are both tails of the pdf of anomalies...
i.e. the same number of values are selected on either tails.
returns the global percentile based thresholds and binary arrays of consecutive windows
Parameters:
-----------
data : The anomalies whose threshold you want to calculate
Universal:
---------
start_dates, idx_dates_win, per
Returns:
--------
threshold_neg: the threshold for negative extremes; size = # windows
threshold_pos: the threshold for positive extremes; size = # windows
bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data
bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data
"""
thresholds_1= [] #thresholds for consecutive windows of defined size for a 'per' percentile
thresholds_2= [] #thresholds for consecutive windows of defined size for a '100-per' percentile
bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events
bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events
for i in range(len(start_dates)):
ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:]
threshold_loc_1 = np.percentile(ano_loc[ano_loc.mask == False],per) # calculation of threshold for the local anomalies
thresholds_1 . append(threshold_loc_1)
threshold_loc_2 = np.percentile(ano_loc[ano_loc.mask == False],(100-per))
thresholds_2 . append(threshold_loc_2)
# Binary arrays:
if per <=50:
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_1
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_2
else:
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_1
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_2
# Thresholds for consecutive windows:
if per < 50:
threshold_neg = np.ma.array(thresholds_1)
threshold_pos = np.ma.array(thresholds_2)
elif per > 50:
threshold_neg = np.ma.array(thresholds_2)
threshold_pos = np.ma.array(thresholds_1)
return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos
# Calculation of thresholds (rth percentile combines for both tails):
# ------------------------------------------------------------
def Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per ):
"""
In this method the rth percentile threshold is calculated at sum of both tails of the pdf of anomalies...
i.e. total number of elements on left and right tail make up for rth percentile (jakob 2014, anex A2)...
This can be done by taking a modulus of anomalies and then calcuate the rth percentile th = q
Negative extremes: anomalies < -q
Positive extremes: anomalies > q
Returns the global percentile based thresholds and binary arrays of consecutive windows
Parameters:
-----------
data : The anomalies whose threshold you want to calculate
Universal:
---------
start_dates, idx_dates_win, per
Returns:
--------
threshold_neg: the threshold for negative extremes; size = # windows
threshold_pos: the threshold for positive extremes; size = # windows
bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data
bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data
"""
thresholds_p= [] #thresholds for consecutive windows of defined size for a 'per' percentile
thresholds_n= [] #thresholds for consecutive windows of defined size for a '100-per' percentile
bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events
bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events
assert per <50, "Percentile must be less than 50"
for i in range(len(start_dates)):
ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:]
threshold_loc = np.percentile(np.abs(ano_loc[ano_loc.mask == False]), (100-per) ) # calculation of threshold for the local anomalies
# The (100-per) is used because after taking the modulus negative extremes fall along positive on the right hand
thresholds_p . append(threshold_loc)
thresholds_n . append(-threshold_loc)
# Binary arrays:
# --------------
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < -threshold_loc
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc
# Thresholds for consecutive windows:
# -----------------------------------
threshold_neg = np.ma.array(thresholds_n)
threshold_pos = np.ma.array(thresholds_p)
return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos
limits = {}
limits ['min'] = {}
limits ['max'] = {}
limits ['min']['th_pos'] = 0
limits ['max']['th_pos'] = 0
limits ['min']['th_neg'] = 0
limits ['max']['th_neg'] = 0
p =0
for source_run in source_selected:
for member_run in common_members [source_run]:
p = p+1
# threshold at each tail
if th_type == 'ind':
A,B,C,D = Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per )
if th_type == 'common':
A,B,C,D = Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per )
Results[source_run][member_run]['th_neg'] = A
Results[source_run][member_run]['th_pos'] = B
Results[source_run][member_run]['bin_ext_neg'] = C
Results[source_run][member_run]['bin_ext_pos'] = D
Results[source_run][member_run]['ts_th_neg'] = np.array([np.array([A[i]]*win_len) for i in range(len(A))]).flatten()
Results[source_run][member_run]['ts_th_pos'] = np.array([np.array([B[i]]*win_len) for i in range(len(B))]).flatten()
# Checking
if p%3 == 0: print ("Calculating Thresholds ......")
elif p%3 == 1: print ("Calculating Thresholds ....")
else: print ("Calculating Thresholds ..")
del A,B,C,D
# Saving the binary data
# ----------------------
save_binary_common = 'n'
if save_binary_common in ['y','yy','Y','yes']:
"""
To save the binary matrix of the so that the location and duration of the extremes can be identified.
If you want to save the binary matrix of extremes as nc files
this was done so that this coulld be used as input the attribution analysis
"""
for source_run in source_selected:
for member_run in common_members [source_run]:
path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run)
# Check if the directory 'path_TCE' already exists? If not, then create one:
if os.path.isdir(path_TCE) == False:
os.makedirs(path_TCE)
for ext_type in ['neg','pos']:
print("Saving the binary matrix for %s,%s,%s"%(source_run,member_run,ext_type))
with nc4.Dataset( path_TCE + '%s_%s_bin_%s.nc'%(source_run,member_run,ext_type), mode = 'w') as dset:
dset .createDimension( "time" ,size = nc_ano[source_run][member_run].variables['time'].size)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run +'_bin' ,datatype = float, dimensions = ("time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
t.axis = "T"
x.axis = "X"
y.axis = "Y"
t[...] = nc_ano[source_run][member_run].variables['time'] [...]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Results[source_run][member_run]['bin_ext_%s'%ext_type]
z.missing_value = 1e+36
z.stardard_name = variable_run+" binarys for %s extremes based on %dth percentile"%(ext_type,per)
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
# TCE: Calculations:
# ------------------
lags_TCE = np.asarray([0,1,2,3,4], dtype = int)
def Binary_Mat_TCE_Win (bin_ar, win_start_year=2000,lags = lags_TCE, land_frac= data_lf [source_run]):
"""
Aim:
----
To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified.
Returns:
--------
bin_TCE_01s: are the binary values of extreme values in a TCE only at qualified locations with gaps ( actual as value 0) [hightlight extreme values]
bin_TCE_1s : are the binary values of extreme values in a TCE only at qualified locations with gaps ( 0 replaced with value 1) [selecting full TCE with only 1s]
bin_TCE_len : are the len of TCE extreme events, the length of TCE is captured at the trigger locations
shape : These matrix are of shape (5,300,192,288) i.e. lags(0-4 months), time(300 months or 25 years {2000-24}), lat(192) and lon(288).
"""
from functions import create_seq_mat
for i,date in enumerate(start_dates):
if date.year in [win_start_year]:
start_yr_idx = i
data = bin_ar[start_yr_idx*win_len: (start_yr_idx+1)*win_len]
del bin_ar
bin_TCE_1s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
bin_TCE_01s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
bin_TCE_len = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
for lag in lags:
for lat_i in range( data.shape[1] ):
for lon_i in range( data.shape[2] ):
if land_frac[...][lat_i,lon_i] != 0:
#print lag, lat_i, lon_i
try:
tmp = patch_with_gaps_and_eventsize (data[:,lat_i,lon_i], max_gap =2, min_cont_event_size=3, lag=lag)
for idx, trig in enumerate (tmp[1]):
bin_TCE_01s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = tmp[0][idx]
bin_TCE_1s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = np.ones(tmp[0][idx].shape)
bin_TCE_len [lag, trig, lat_i, lon_i] = np.sum(np.ones(tmp[0][idx].shape))
except:
bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
else:
bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
return bin_TCE_01s, bin_TCE_1s, bin_TCE_len
all_win_start_years = np.arange(1850,2100,25)
# To do TCE analysis for all windows
win_start_years = np.arange(1850,2100,25)
# To check only for win starting at 2000
#win_start_years = [2000] # Testing with the year 2000-24 dataset first
save_TCE_binary = 'n'
if save_TCE_binary in ['y','yy','Y','yes']:
"""
To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified.
If you want to save the binary matrix of extremes as nc files
this was done so that this coulld be used as input the attribution analysis
"""
for start_yr in win_start_years:
win_idx = np.where( all_win_start_years == start_yr)[0][0]
for source_run in source_selected:
for member_run in common_members [source_run]:
Binary_Data_TCE = {} # Dictionary to save negative and positive Binary TCEs
Binary_Data_TCE ['neg'] = {}
Binary_Data_TCE ['pos'] = {}
bin_neg = Results[source_run][member_run]['bin_ext_neg']
bin_pos = Results[source_run][member_run]['bin_ext_pos']
# Starting with Negative TCEs first
# ---------------------------------
Binary_Data_TCE ['neg']['bin_TCE_01s'], Binary_Data_TCE ['neg']['bin_TCE_1s'], Binary_Data_TCE ['neg']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_neg, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run])
Binary_Data_TCE ['pos']['bin_TCE_01s'], Binary_Data_TCE ['pos']['bin_TCE_1s'], Binary_Data_TCE ['pos']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_pos, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run])
path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run)
# Check if the directory 'path_TCE' already exists? If not, then create one:
if os.path.isdir(path_TCE) == False:
os.makedirs(path_TCE)
for ext_type in ['neg','pos']:
print("Saving the 01 TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type))
with nc4.Dataset( path_TCE + 'bin_TCE_01s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset:
dset .createDimension( "lag",size = lags_TCE.size)
dset .createDimension( "time",size = win_len)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run +'_TCE_01s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
w.axis = "T"
t.axis = "T"
x.axis = "X"
y.axis = "Y"
w[...] = lags_TCE
t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Binary_Data_TCE [ext_type]['bin_TCE_01s']
z.missing_value = 1e+36
z.stardard_name = variable_run+" binary TCE (01s) matrix for 25 years starting at the year %d"%start_yr
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
w.units = "month"
w.setncattr ("standard_name","lags in months")
w.missing_value = 1e+36
print("Saving the 1s TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type))
with nc4.Dataset( path_TCE + 'bin_TCE_1s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset:
dset .createDimension( "lag",size = lags_TCE.size)
dset .createDimension( "time",size = win_len)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run+'_TCE_1s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
w.axis = "T"
t.axis = "T"
x.axis = "X"
y.axis = "Y"
w[...] = lags_TCE
t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Binary_Data_TCE [ext_type]['bin_TCE_1s']
z.missing_value = 1e+36
z.stardard_name = variable_run +" binary TCE (1s) matrix for 25 years starting at the year %d"%start_yr
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
w.units = "month"
w.setncattr ("standard_name","lags in months")
w.missing_value = 1e+36
# Calculation of TS of gain or loss of carbon uptake
# --------------------------------------------------
def Global_TS_of_Extremes(bin_ar, ano_gC, area = 0, lf = 0):
"""
Returns the global TS of :
1. total carbon loss/gain associated neg/pos extremes
2. total freq of extremes
3. total area affected by extremes
Parameters:
-----------
bin_ar : the binary array of extremes (pos/neg)
ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain
Universal:
----------
2-d area array (nlat, nlon), dates_win (# wins, win_size)
Returns:
--------
1d array of length # wins x win_size for all : ext_gC_ts, ext_freq_ts, ext_area_ts
"""
print (" Calculating Extremes ... " )
ext_ar = bin_ar * ano_gC # extremes array
if (area == 0) and (lf == 0) :
print ("The area under extreme will not be calculated... \nGrid area input and land fraction is not provided ... \nThe returned area is 0 (zeros)")
ext_area_ar = bin_ar * area[...] * lf[...] # area array of extremes
ext_gC_ts = []
ext_freq_ts = []
ext_area_ts = []
for i in range(dates_win.flatten().size):
ext_gC_ts . append(np.ma.sum(ext_ar[i]))
ext_freq_ts . append(np.ma.sum(bin_ar[i]))
ext_area_ts . append(np.ma.sum(ext_area_ar[i]))
return np.ma.array(ext_gC_ts), np.ma.array(ext_freq_ts),np.ma.array(ext_area_ts)
# Calculating the slopes of GPP extremes
# --------------------------------------
def Slope_Intercept_Pv_Trend_Increase ( time, ts, until_idx1=2100, until_idx2=None):
"""
Returns the slope, intercept, r value , p value and trend line points for time period 1850-2100 (as '_21') and 2101-2300 ('_23')
Parameters:
-----------
One dimentional time series of len 5400 from 1850 through 2299
Returns:
--------
single values for slope, intercept, r value , p value, increase percentage**
1d array for same legnth as 'ts' for 'trend'
** it return the percent increase of trend line relavtive to the year 1850 (mean trend line value),..
"""
until_idx1 = int (until_idx1)
if until_idx2 != None:
until_idx2 = int (until_idx2)
# calculation of the magnitudes of global gpp loss and trend from 1850- until idx-1
slope_1, intercept_1,rv_1,pv_1,std_e1 = stats.linregress(time[...][:until_idx1],ts[:until_idx1])
trend_1 = slope_1*time[...][:until_idx1]+intercept_1
increase_1 = (trend_1[-1]-trend_1[0])*100/trend_1[0]
# calculation of the magnitudes of global gpp loss and trend from index-1 to until-idx2
if until_idx2 != None:
slope_2, intercept_23,rv_23,pv_23,std_e23 = stats.linregress(time[...][until_idx1:until_idx2],ts[until_idx1:until_idx22])
trend_2 = slope_2*time[...][until_idx1:until_idx2]+intercept_23
increase_2 = (trend_2[-1]-trend_2[0])*100/trend_2[0]
increase_2_r1850 = (trend_2[-1]-trend_1[0])*100/trend_1[0]
return slope_1,intercept_1,pv_1,trend_1,increase_1,slope_2,intercept_2,pv_2,trend_2,increase_2,increase_2_r1850
else:
return slope_1,intercept_1,pv_1,trend_1,increase_1
# Saving the results of TS carbon loss/gain
for source_run in source_selected:
for member_run in common_members [source_run]:
Results[source_run][member_run]['ts_global_gC'] = {}
Results[source_run][member_run]['ts_global_area'] = {}
Results[source_run][member_run]['ts_global_freq'] = {}
Results[source_run][member_run]['ts_global_gC']['neg_ext'] = {}
Results[source_run][member_run]['ts_global_gC']['pos_ext'] = {}
Results[source_run][member_run]['ts_global_area']['neg_ext']= {}
Results[source_run][member_run]['ts_global_area']['pos_ext']= {}
Results[source_run][member_run]['ts_global_freq']['neg_ext']= {}
Results[source_run][member_run]['ts_global_freq']['pos_ext']= {}
for source_run in source_selected:
print ("Calculating the global TS of Extremes for %s"%source_run)
for member_run in common_members [source_run]:
# Negative Extremes:
# ------------------
ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_neg'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
area = data_area [source_run],
lf = data_lf [source_run])
Results[source_run][member_run]['ts_global_gC' ]['neg_ext']['ts'] = ts_ext
Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'] = ts_area
Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'] = ts_freq
del ts_ext , ts_freq, ts_area
# Positive Extremes:
# -----------------
ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_pos'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
area = data_area [source_run],
lf = data_lf [source_run])
Results[source_run][member_run]['ts_global_gC' ]['pos_ext']['ts'] = ts_ext
Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'] = ts_area
Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'] = ts_freq
del ts_ext , ts_freq, ts_area
# -----------------
for source_run in source_selected:
for member_run in common_members [source_run]:
# Negative Extremes gC:
# ---------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_gC']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_gC']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_gC']['neg_ext']['trend_21'] = trend
Results[source_run][member_run]['ts_global_gC']['neg_ext']['inc_21' ] = increase
del slope,intercept,pv,trend,increase
# Positive Extremes gC:
# ---------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_gC']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_gC']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_gC']['pos_ext']['trend_21'] = trend
Results[source_run][member_run]['ts_global_gC']['pos_ext']['inc_21' ] = increase
del slope,intercept,pv,trend,increase
# -----------------------------------
# -----------------------------------
# Negative Extremes freq:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_freq']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_freq']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_freq']['neg_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_freq']['neg_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# Positive Extremes freq:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_freq']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_freq']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_freq']['pos_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_freq']['pos_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# -----------------------------------
# -----------------------------------
# Negative Extremes area:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_area']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_area']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_area']['neg_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_area']['neg_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# Positive Extremes area:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_area']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_area']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_area']['pos_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_area']['pos_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# -----------------------------------
def Sum_and_Diff_of_Fluxes_perWin(ano_gC, bin_ar = None, data_type = 'ext', diff_ref_yr = 1850):
"""
returns a 2-d array sum of fluxes and difference of the sum of fluxes with reference to the ref yr
Parameters:
----------
bin_ar: the binary array of extremes (pos/neg)
ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain
diff_ref_yr : the starting year of the reference time window for differencing
data_type : do you want to calculate the sum and difference of extremes or original fluxes? ...
'ext' is for extremes and will mask based on the 'bin_ar' in calculation ...
otherwise it will not multiply by bin_ar and the original flux difference will be calculated.
'ext' will calculate the extremes and anything else with calc on original flux diff
Universal:
----------
start_dates : the start_dates of every 25 year window, size = # wins
Returns:
--------
sum_flux : shape (# wins, nlat,nlon), sum of fluxes per window
diff_flux : shape (# wins, nlat,nlon), difference of sum of fluxes per window and reference window
"""
if data_type != 'ext': bin_ar = np.ma.ones(ano_gC.shape)
sum_ext = []
for i in range(len(start_dates)):
ext_gC = bin_ar[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:] * ano_gC[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:]
sum_ext . append (np.ma.sum(ext_gC, axis = 0))
sum_ext = np.ma.asarray(sum_ext)
#to calculate the index of the reference year starting window:
for i,date in enumerate(start_dates):
if date.year in [diff_ref_yr]:
diff_yr_idx = i
diff_ext = []
for i in range(len(start_dates)):
diff = sum_ext[i] - sum_ext[diff_yr_idx]
diff_ext . append (diff)
diff_ext = np.ma.asarray(diff_ext)
return sum_ext , diff_ext
# ----------------------------------------------------------
# Preparing the storage
# ----------------------------------------------------------
for source_run in source_selected:
for member_run in common_members [source_run]:
# Negative Extremes:
sum_neg_ext , diff_neg_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_neg'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
data_type = 'ext',
diff_ref_yr = 1850)
Results[source_run][member_run]['sum_neg_ext'] = sum_neg_ext
Results[source_run][member_run]['diff_neg_ext'] = diff_neg_ext
# Positive extremes:
sum_pos_ext , diff_pos_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_pos'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
data_type = 'ext',
diff_ref_yr = 1850)
Results[source_run][member_run]['sum_pos_ext'] = sum_pos_ext
Results[source_run][member_run]['diff_pos_ext'] = diff_pos_ext
del sum_neg_ext , diff_neg_ext, sum_pos_ext , diff_pos_ext
#Negative Flux/Ori
#sum_neg_ori , diff_neg_ori = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = None,
# ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
# data_type = 'ori',
# diff_ref_yr = 1850)
# Results[source_run][member_run]['sum_neg_ori'] = sum_neg_ori
# Results[source_run][member_run]['diff_neg_ori'] = diff_neg_ori
# Results[source_run][member_run]['sum_pos_ext'] = {}
# Results[source_run][member_run]['diff_neg_ext'] = {}
# Results[source_run][member_run]['diff_pos_ext'] = {}
# Regional analysis
# -----------------
import regionmask
# Selection the member_run manually
member_run = common_members[source_run] [0]
lon = nc_ano[source_run][member_run].variables ['lon']
lat = nc_ano[source_run][member_run].variables ['lat']
# for the plotting
lon_bounds = nc_ano[source_run][member_run].variables [lon.bounds]
lat_bounds = nc_ano[source_run][member_run].variables [lat.bounds]
lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1]))
lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1]))
# Creating mask of the regions based on the resolution of the model
mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values
# important information:
srex_abr = regionmask.defined_regions.srex.abbrevs
srex_names = regionmask.defined_regions.srex.names
srex_nums = regionmask.defined_regions.srex.numbers
srex_centroids = regionmask.defined_regions.srex.centroids
srex_polygons = regionmask.defined_regions.srex.polygons
mask_ma = np.ma.masked_invalid(mask)
import matplotlib.pyplot as plt
import os
"""
Basemaps not working anymore
===========================
#1- Hack to fix missing PROJ4 env var
import os
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
#-1 Hack end
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
"""
"""
Regional Plots
--------------
#fig = plt.figure()
#ax = plt.subplot(111, projection=ccrs.PlateCarree())
fig,ax = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
bmap = Basemap( projection = 'eck4',
lon_0 = 0.,
resolution = 'c')
LON,LAT = np.meshgrid(lon_edges,lat_edges)
ax = bmap.pcolormesh(LON,LAT, mask_ma, cmap ='viridis')
bmap .drawparallels(np.arange(-90., 90., 30.),fontsize=14, linewidth = .2)
bmap .drawmeridians(np.arange(0., 360., 60.),fontsize=14, linewidth = .2)
bmap .drawcoastlines(linewidth = .25,color='lightgrey')
plt.colorbar(ax, orientation='horizontal', pad=0.04)
fig.savefig (web_path + "SREX_regions.pdf")
# Cartopy Plotting
# ----------------
import cartopy.crs as ccrs
from shapely.geometry.polygon import Polygon
import cartopy.feature as cfeature
# Fixing the error {'GeoAxesSubplot' object has no attribute '_hold'}
from matplotlib.axes import Axes
from cartopy.mpl.geoaxes import GeoAxes
GeoAxes._pcolormesh_patched = Axes.pcolormesh
proj_trans = ccrs.PlateCarree()
fig = plt.figure(figsize = (9,5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree())
mask_ma = np.ma.masked_invalid(mask)
h = ax.pcolormesh(lon_edges[...], lat_edges[...], mask_ma, transform = proj_trans)#, cmap='viridis')
ax.coastlines()
plt.colorbar(h, orientation='horizontal', pad=0.04)
# Plot the abs at the centroids
for idx, abr in enumerate(srex_abr):
plt.text ( srex_centroids[idx][0], srex_centroids[idx][-1], srex_abr[idx],
horizontalalignment='center',
transform = proj_trans)
ax.add_geometries([srex_polygons[idx]], crs = proj_trans, facecolor='none', edgecolor='red', alpha=0.8)
fig.savefig (web_path + "SREX_regions_cpy.pdf")
plt.close(fig)
"""
# =================================================================================================
# =================================================================================================
## # ## ########
# # # ## ## ##
## # # # ##
# # ## ## ##
# ##### ## ##
# =================================================================================================
# =================================================================================================
# Creating a lis to Unique colors for multiple models:
# ---------------------------------------------------
NUM_COLORS = len(source_selected)
LINE_STYLES = ['solid', 'dashed', 'dashdot', 'dotted']
NUM_STYLES = len(LINE_STYLES)
sns.reset_orig() # get default matplotlib styles back
clrs = sns.color_palette('husl', n_colors=NUM_COLORS)
# Creating the ticks for x axis (every 25 years):
# ----------------------------------------------
tmp_idx = np.arange(0, 3013, 300) #for x ticks
tmp_idx[-1]=tmp_idx[-1]-1
dates_ticks = []
years_ticks = []
for i in tmp_idx:
a = dates_win.flatten()[i]
dates_ticks.append(a)
years_ticks.append(a.year)
# Creating the x-axis years (Monthly)
# -----------------------------------
x_years = [d.year for d in dates_win.flatten()]
# Caption (optional): This dictionary could be used to save the captions of the figures
# -------------------------------------------------------------------------------------
Captions = {}
# PLOTING THE THRESHOLD FOR QUALIFICATION OF EXTREME EVENTS: fig[1-9]
# ===================================================================
if th_type == 'ind':
fig1,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
ymin = 400
ymax = 8000
for s_idx, source_run in enumerate(source_selected):
for m_idx, member_run in enumerate(common_members [source_run]):
# ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
# 'r', label = "Th$-$ %s"%source_run, alpha = .7)
# ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
# clrs[s_idx], ls='--', label = "Th$-$ %s"%source_run, alpha = .7)
ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
'r', ls='--', label = "Th$-$ %s"%source_run, alpha = .3)
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
ax2.set_ylim([ymin,ymax])
#ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.tick_params(axis='y', colors='red')
# ax2.set_xticks(dates_ticks)
ax2.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
ax1=ax2.twinx()
# ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
# 'g', label = "Th+ %s"%source_run, alpha = .7)
ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
'g', label = "Th+ %s"%source_run, alpha = .3)
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
ax1.set_ylim([ymin,ymax])
#ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax1.tick_params(axis='y', colors='green')
# ax1.set_xticks(dates_ticks)
# ax1.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
labels2, ids2 = np.unique(labels2, return_index=True)
lines = [lines[i] for i in ids]
lines2 = [lines2[i] for i in ids2]
# ax2.legend(lines + lines2, labels + labels2, loc= 'best',fontsize =12)
#continue
fig1.savefig(web_path + 'Threshold/ts_threshold_all_scenario_%s_per_%s.pdf'%(variable_run,int(per)))
plt.close(fig1)
del fig1
# Threshold per model for the 'th_type' == 'ind' and per = 1.0
# -------------------------------------------------------------
for source_run in source_selected:
fig2,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
pd.plotting.deregister_matplotlib_converters()
if source_run == 'CESM2' : ymin = 400 ; ymax = 700
if source_run == 'CanESM5' : ymin = 2000 ; ymax = 8000
if source_run == 'IPSL-CM6A-LR' : ymin = 1700 ; ymax = 2900
if source_run == 'BCC-CSM2-MR' : ymin = 400 ; ymax = 1000
if source_run == 'CNRM-ESM2-1' : ymin = 1000 ; ymax = 1500
if source_run == 'CNRM-CM6-1' : ymin = 1000 ; ymax = 1800
for m_idx, member_run in enumerate(common_members [source_run]):
L1= ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
'r', label = "Th$-$ %s"%member_run, linewidth = 0.3, alpha = .7)
L1[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
#ax2.set_xlim([dates_ticks[0],dates_ticks[-1]])
#ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.tick_params(axis='y', colors='red')
ax2.grid(which='major', linestyle='--', linewidth='0.3', color='gray')
ax1=ax2.twinx()
for m_idx, member_run in enumerate(common_members [source_run]):
L2= ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
'g', label = "Th+ %s"%member_run, linewidth = 0.3, alpha = .7)
L2[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
#ax1.set_yticklabels([])
#ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax1.tick_params(axis='y', colors='green')
# ax1.grid(which='major', linestyle='--', linewidth='0.3', color='gray')
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
ax2.set_ylim([ymin,ymax])
ax1.set_ylim([ymin,ymax])
ax1.set_xticks(dates_ticks)
ax1.set_xticklabels(years_ticks)
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0,fontsize =8)
fig2.savefig(web_path + 'Threshold/ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig2.savefig(path_save + 'ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig2)
del fig2,ax2
# Plotting thresholds when 'th_type' == 'common':
# -----------------------------------------------
if th_type == 'common':
fig3 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title("TS of Thresholds for CMIP6 models for percentile = %d"%int(per))
pd.plotting.deregister_matplotlib_converters()
for s_idx, source_run in enumerate(source_selected):
for m_idx, member_run in enumerate(common_members [source_run]):
plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = .8, linewidth = .7)
plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14)
plt.xlabel("Time", fontsize = 14)
plt.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
plt.legend()
break #Plotting only the first ensemble member
fig3.savefig(web_path + 'Threshold/ts_thresholdc_all_models_%s_per_%s.pdf'%(variable_run,int(per)))
plt.close(fig3)
del fig3
# Threshold per model for the 'th_type' == 'common' and per = 5.0
# ---------------------------------------------------------------
for s_idx, source_run in enumerate(source_selected):
fig4 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title("TS of %d percentile Thresholds of %s for the model %s"%(per, variable_run.upper(), source_run))
pd.plotting.deregister_matplotlib_converters()
if variable_run == 'gpp':
if source_run == 'CESM2' : ymin = 250 ; ymax = 400
if source_run == 'CanESM5' : ymin = 1500 ; ymax = 4500
if source_run == 'IPSL-CM6A-LR' : ymin = 1200 ; ymax = 2100
if source_run == 'BCC-CSM2-MR' : ymin = 300 ; ymax = 600
if source_run == 'CNRM-ESM2-1' : ymin = 700 ; ymax = 900
if source_run == 'CNRM-CM6-1' : ymin = 600 ; ymax = 1100
if variable_run == 'nbp':
if source_run == 'CESM2' : ymin = 130 ; ymax = 230
if variable_run == 'ra':
if source_run == 'CESM2' : ymin = 180 ; ymax = 240
if variable_run == 'rh':
if source_run == 'CESM2' : ymin = 100 ; ymax = 170
for m_idx, member_run in enumerate(common_members [source_run]):
plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = 1, linewidth = 1)
break #Plotting only the first ensemble member
plt.ylim ((ymin,ymax))
plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14)
plt.xlabel("Time", fontsize = 14)
plt.grid(which='major', linestyle=':', linewidth='0.4', color='gray')
plt.legend()
fig4.savefig(web_path + 'Threshold/ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig4.savefig(path_save + 'ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig4)
del fig4
# PLOTING THE GLOBAL TIMESERIES OF THE EXTREME EVENTS : fig[11-19]
# ======================================================================================
for s_idx, source_run in enumerate(source_selected):
fig11 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.style.use("classic")
plt.title ("TS global %s extremes for %s when percentile is %d"%(variable_run.upper(), source_run, per))
| pd.plotting.deregister_matplotlib_converters() | pandas.plotting.deregister_matplotlib_converters |
import numpy as np
import pandas as pd
import argparse
import math
import csv
from collections import OrderedDict
def get_args():
parser = argparse.ArgumentParser(description='Call peaks over CRISPRi screen windows.')
parser.add_argument('guide_data',help='Input flow-fish guide count data.')
parser.add_argument('output_file',help='Output file to print TSV to.')
parser.add_argument('--meta_data','-m',type=str,default=None,help='Tab-delimited metadata table. '+\
'Must contain header with `Short_name` '+\
'and `cutting_specificity_score` fields.')
parser.add_argument('--window_size','-w',type=int,default=100,help='Window size over which to subset.')
parser.add_argument('--subset_frac','-f',type=float,default=0.5,help='Maximum fraction of guides to remove by subsetting.')
parser.add_argument('--negate','-n',action='store_true',help='Negate `cutting_spcificity_score` to flip sort order.')
parser.add_argument('--random_seed','-r',type=int,default=None,help='Set random seed for '+\
'consistant sampling. '+\
'Use current date (e.g., 20200901). '+\
'Required if meta_data is None')
args = parser.parse_args()
args.step_size = args.window_size
return args
def check_args(args):
assert args.subset_frac > 0, "subset_frac aust be greater than 0."
assert args.subset_frac < 0, "subset_frac aust be less than 1."
assert args.window_size > 0, "Windows must take up space."
return True
def check_overlap_bed(interval, array):
height = array.shape[0]
intervals = np.stack([np.tile(interval,(height,1)), array],axis=0)
swaghook = (intervals[0,:,1] < intervals[1,:,1]).astype(int)
chrom = (intervals[0,:,0] == intervals[0,:,0])
overlap = intervals[1-swaghook,np.arange(height),2] > intervals[swaghook,np.arange(height),1]
return overlap & chrom
def main(args):
#####################
## Import data ##
#####################
guide_data = | pd.read_table(args.guide_data, sep='\t', header=0, index_col=False) | pandas.read_table |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.reshape.merge import _MergeOperation
from janitor.utils import check, check_column
@pf.register_dataframe_method
def conditional_join(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
*conditions,
how: str = "inner",
sort_by_appearance: bool = False,
df_columns: Optional[Any] = None,
right_columns: Optional[Any] = None,
) -> pd.DataFrame:
"""
This is a convenience function that operates similarly to `pd.merge`,
but allows joins on inequality operators,
or a combination of equi and non-equi joins.
Join solely on equality are not supported.
If the join is solely on equality, `pd.merge` function
covers that; if you are interested in nearest joins, or rolling joins,
or the first match (lowest or highest) - `pd.merge_asof` covers that.
There is also the IntervalIndex, which is usually more efficient
for range joins, especially if the intervals do not overlap.
Column selection in `df_columns` and `right_columns` is possible using the
[`select_columns`][janitor.functions.select_columns.select_columns] syntax.
This function returns rows, if any, where values from `df` meet the
condition(s) for values from `right`. The conditions are passed in
as a variable argument of tuples, where the tuple is of
the form `(left_on, right_on, op)`; `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. For multiple conditions, the and(`&`)
operator is used to combine the results of the individual conditions.
The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.
A binary search is used to get the relevant rows for non-equi joins;
this avoids a cartesian join, and makes the process less memory intensive.
For equi-joins, Pandas internal merge function is used.
The join is done only on the columns.
MultiIndex columns are not supported.
For non-equi joins, only numeric and date columns are supported.
Only `inner`, `left`, and `right` joins are supported.
If the columns from `df` and `right` have nothing in common,
a single index column is returned; else, a MultiIndex column
is returned.
Example:
>>> import pandas as pd
>>> import janitor
>>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]})
>>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1],
... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3],
... })
>>> df1
value_1
0 2
1 5
2 7
3 1
4 3
5 4
>>> df2
value_2A value_2B
0 0 1
1 3 5
2 7 9
3 12 15
4 0 1
5 2 4
6 3 6
7 1 3
>>> df1.conditional_join(
... df2,
... ("value_1", "value_2A", ">="),
... ("value_1", "value_2B", "<=")
... )
value_1 value_2A value_2B
0 2 1 3
1 2 2 4
2 5 3 5
3 5 3 6
4 7 7 9
5 1 0 1
6 1 0 1
7 1 1 3
8 3 1 3
9 3 2 4
10 3 3 5
11 3 3 6
12 4 2 4
13 4 3 5
14 4 3 6
:param df: A pandas DataFrame.
:param right: Named Series or DataFrame to join to.
:param conditions: Variable argument of tuple(s) of the form
`(left_on, right_on, op)`, where `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. The operator can be any of
`==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,
the and(`&`) operator is used to combine the results
of the individual conditions.
:param how: Indicates the type of join to be performed.
It can be one of `inner`, `left`, `right`.
Full join is not supported. Defaults to `inner`.
:param sort_by_appearance: Default is `False`.
This is useful for strictly non-equi joins,
where the user wants the original order maintained.
If True, values from `df` and `right`
that meet the join condition will be returned
in the final dataframe in the same order
that they were before the join.
:param df_columns: Columns to select from `df`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:param right_columns: Columns to select from `right`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:returns: A pandas DataFrame of the two merged Pandas objects.
"""
return _conditional_join_compute(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
class _JoinOperator(Enum):
"""
List of operators used in conditional_join.
"""
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
STRICTLY_EQUAL = "=="
NOT_EQUAL = "!="
class _JoinTypes(Enum):
"""
List of join types for conditional_join.
"""
INNER = "inner"
LEFT = "left"
RIGHT = "right"
operator_map = {
_JoinOperator.STRICTLY_EQUAL.value: operator.eq,
_JoinOperator.LESS_THAN.value: operator.lt,
_JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,
_JoinOperator.GREATER_THAN.value: operator.gt,
_JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,
_JoinOperator.NOT_EQUAL.value: operator.ne,
}
less_than_join_types = {
_JoinOperator.LESS_THAN.value,
_JoinOperator.LESS_THAN_OR_EQUAL.value,
}
greater_than_join_types = {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.GREATER_THAN_OR_EQUAL.value,
}
def _check_operator(op: str):
"""
Check that operator is one of
`>`, `>=`, `==`, `!=`, `<`, `<=`.
Used in `conditional_join`.
"""
sequence_of_operators = {op.value for op in _JoinOperator}
if op not in sequence_of_operators:
raise ValueError(
"The conditional join operator "
f"should be one of {sequence_of_operators}"
)
def _conditional_join_preliminary_checks(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
conditions: tuple,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> tuple:
"""
Preliminary checks for conditional_join are conducted here.
Checks include differences in number of column levels,
length of conditions, existence of columns in dataframe, etc.
"""
check("right", right, [pd.DataFrame, pd.Series])
df = df.copy()
right = right.copy()
if isinstance(right, pd.Series):
if not right.name:
raise ValueError(
"Unnamed Series are not supported for conditional_join."
)
right = right.to_frame()
if df.columns.nlevels != right.columns.nlevels:
raise ValueError(
"The number of column levels "
"from the left and right frames must match. "
"The number of column levels from the left dataframe "
f"is {df.columns.nlevels}, while the number of column levels "
f"from the right dataframe is {right.columns.nlevels}."
)
if not conditions:
raise ValueError("Kindly provide at least one join condition.")
for condition in conditions:
check("condition", condition, [tuple])
len_condition = len(condition)
if len_condition != 3:
raise ValueError(
"condition should have only three elements; "
f"{condition} however is of length {len_condition}."
)
for left_on, right_on, op in conditions:
check("left_on", left_on, [Hashable])
check("right_on", right_on, [Hashable])
check("operator", op, [str])
check_column(df, [left_on])
check_column(right, [right_on])
_check_operator(op)
if all(
(op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)
):
raise ValueError("Equality only joins are not supported.")
check("how", how, [str])
checker = {jointype.value for jointype in _JoinTypes}
if how not in checker:
raise ValueError(f"'how' should be one of {checker}.")
check("sort_by_appearance", sort_by_appearance, [bool])
if (df.columns.nlevels > 1) and (
isinstance(df_columns, dict) or isinstance(right_columns, dict)
):
raise ValueError(
"Column renaming with a dictionary is not supported "
"for MultiIndex columns."
)
return (
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
def _conditional_join_type_check(
left_column: pd.Series, right_column: pd.Series, op: str
) -> None:
"""
Raise error if column type is not any of numeric or datetime or string.
"""
permitted_types = {
is_datetime64_dtype,
is_numeric_dtype,
is_string_dtype,
is_categorical_dtype,
}
for func in permitted_types:
if func(left_column):
break
else:
raise ValueError(
"conditional_join only supports "
"string, category, numeric, or date dtypes (without timezone) - "
f"'{left_column.name} is of type {left_column.dtype}."
)
lk_is_cat = is_categorical_dtype(left_column)
rk_is_cat = is_categorical_dtype(right_column)
if lk_is_cat & rk_is_cat:
if not left_column.array._categories_match_up_to_permutation(
right_column.array
):
raise ValueError(
f"'{left_column.name}' and '{right_column.name}' "
"should have the same categories, and the same order."
)
elif not is_dtype_equal(left_column, right_column):
raise ValueError(
f"Both columns should have the same type - "
f"'{left_column.name}' has {left_column.dtype} type;"
f"'{right_column.name}' has {right_column.dtype} type."
)
if (op in less_than_join_types.union(greater_than_join_types)) & (
(is_string_dtype(left_column) | is_categorical_dtype(left_column))
):
raise ValueError(
"non-equi joins are supported "
"only for datetime and numeric dtypes. "
f"{left_column.name} in condition "
f"({left_column.name}, {right_column.name}, {op}) "
f"has a dtype {left_column.dtype}."
)
return None
def _conditional_join_compute(
df: pd.DataFrame,
right: pd.DataFrame,
conditions: list,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> pd.DataFrame:
"""
This is where the actual computation
for the conditional join takes place.
A pandas DataFrame is returned.
"""
(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
) = _conditional_join_preliminary_checks(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
eq_check = False
le_lt_check = False
for condition in conditions:
left_on, right_on, op = condition
_conditional_join_type_check(df[left_on], right[right_on], op)
if op == _JoinOperator.STRICTLY_EQUAL.value:
eq_check = True
elif op in less_than_join_types.union(greater_than_join_types):
le_lt_check = True
df.index = range(len(df))
right.index = range(len(right))
multiple_conditions = len(conditions) > 1
if not multiple_conditions:
left_on, right_on, op = conditions[0]
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions
)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df,
right,
*result,
how,
sort_by_appearance,
df_columns,
right_columns,
)
if eq_check:
result = _multiple_conditional_join_eq(df, right, conditions)
elif le_lt_check:
result = _multiple_conditional_join_le_lt(df, right, conditions)
else:
result = _multiple_conditional_join_ne(df, right, conditions)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df, right, *result, how, sort_by_appearance, df_columns, right_columns
)
def _less_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is less than or equal to right_c.
If strict is True, then only indices
where `left_c` is less than
(but not equal to) `right_c` are returned.
A tuple of integer indexes
for left_c and right_c is returned.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="left")
# if any of the positions in `search_indices`
# is equal to the length of `right_keys`
# that means the respective position in `left_c`
# has no values from `right_c` that are less than
# or equal, and should therefore be discarded
len_right = right_c.size
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift to the right to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices]
rows_equal = left_c == rows_equal
# replace positions where rows are equal
# with positions from searchsorted('right')
# positions from searchsorted('right') will never
# be equal and will be the furthermost in terms of position
# example : right_c -> [2, 2, 2, 3], and we need
# positions where values are not equal for 2;
# the furthermost will be 3, and searchsorted('right')
# will return position 3.
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="right")
# now we can safely replace values
# with strictly less than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# check again if any of the values
# have become equal to length of right_c
# and get rid of them
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
right_c = [right_index[ind:len_right] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, len_right - search_indices)
return left_c, right_c
def _greater_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
multiple_conditions: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is greater than or equal to right_c.
If strict is True, then only indices
where `left_c` is greater than
(but not equal to) `right_c` are returned.
if multiple_conditions is False, a tuple of integer indexes
for left_c and right_c is returned;
else a tuple of the index for left_c, right_c, as well
as the positions of left_c in right_c is returned.
"""
# quick break, avoiding the hassle
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="right")
# if any of the positions in `search_indices`
# is equal to 0 (less than 1), it implies that
# left_c[position] is not greater than any value
# in right_c
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift downwards to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices - 1]
rows_equal = left_c == rows_equal
# replace positions where rows are equal with
# searchsorted('left');
# however there can be scenarios where positions
# from searchsorted('left') would still be equal;
# in that case, we shift down by 1
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="left")
# return replacements
# `left` might result in values equal to len right_c
replacements = np.where(
replacements == right_c.size, replacements - 1, replacements
)
# now we can safely replace values
# with strictly greater than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# any value less than 1 should be discarded
# since the lowest value for binary search
# with side='right' should be 1
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
if multiple_conditions:
return left_index, right_index, search_indices
right_c = [right_index[:ind] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, search_indices)
return left_c, right_c
def _not_equal_indices(left_c: pd.Series, right_c: pd.Series) -> tuple:
"""
Use binary search to get indices where
`left_c` is exactly not equal to `right_c`.
It is a combination of strictly less than
and strictly greater than indices.
A tuple of integer indexes for left_c and right_c
is returned.
"""
dummy = np.array([], dtype=int)
# deal with nulls
l1_nulls = dummy
r1_nulls = dummy
l2_nulls = dummy
r2_nulls = dummy
any_left_nulls = left_c.isna()
any_right_nulls = right_c.isna()
if any_left_nulls.any():
l1_nulls = left_c.index[any_left_nulls.array]
l1_nulls = l1_nulls.to_numpy(copy=False)
r1_nulls = right_c.index
# avoid NAN duplicates
if any_right_nulls.any():
r1_nulls = r1_nulls[~any_right_nulls.array]
r1_nulls = r1_nulls.to_numpy(copy=False)
nulls_count = l1_nulls.size
# blow up nulls to match length of right
l1_nulls = np.tile(l1_nulls, r1_nulls.size)
# ensure length of right matches left
if nulls_count > 1:
r1_nulls = np.repeat(r1_nulls, nulls_count)
if any_right_nulls.any():
r2_nulls = right_c.index[any_right_nulls.array]
r2_nulls = r2_nulls.to_numpy(copy=False)
l2_nulls = left_c.index
nulls_count = r2_nulls.size
# blow up nulls to match length of left
r2_nulls = np.tile(r2_nulls, l2_nulls.size)
# ensure length of left matches right
if nulls_count > 1:
l2_nulls = np.repeat(l2_nulls, nulls_count)
l1_nulls = np.concatenate([l1_nulls, l2_nulls])
r1_nulls = np.concatenate([r1_nulls, r2_nulls])
outcome = _less_than_indices(left_c, right_c, strict=True)
if outcome is None:
lt_left = dummy
lt_right = dummy
else:
lt_left, lt_right = outcome
outcome = _greater_than_indices(
left_c, right_c, strict=True, multiple_conditions=False
)
if outcome is None:
gt_left = dummy
gt_right = dummy
else:
gt_left, gt_right = outcome
left_c = np.concatenate([lt_left, gt_left, l1_nulls])
right_c = np.concatenate([lt_right, gt_right, r1_nulls])
if (not left_c.size) & (not right_c.size):
return None
return left_c, right_c
def _eq_indices(
left_c: pd.Series,
right_c: pd.Series,
) -> tuple:
"""
Use binary search to get indices where left_c
is equal to right_c.
Returns a tuple of the left_index, right_index,
lower_boundary and upper_boundary.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
lower_boundary = right_c.searchsorted(left_c, side="left")
upper_boundary = right_c.searchsorted(left_c, side="right")
keep_rows = lower_boundary < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
lower_boundary = lower_boundary[keep_rows]
upper_boundary = upper_boundary[keep_rows]
return left_index, right_index, lower_boundary, upper_boundary
def _generic_func_cond_join(
left_c: pd.Series,
right_c: pd.Series,
op: str,
multiple_conditions: bool,
) -> tuple:
"""
Generic function to call any of the individual functions
(_less_than_indices, _greater_than_indices,
or _not_equal_indices).
"""
strict = False
if op in {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.LESS_THAN.value,
_JoinOperator.NOT_EQUAL.value,
}:
strict = True
if op in less_than_join_types:
return _less_than_indices(left_c, right_c, strict)
elif op in greater_than_join_types:
return _greater_than_indices(
left_c, right_c, strict, multiple_conditions
)
elif op == _JoinOperator.NOT_EQUAL.value:
return _not_equal_indices(left_c, right_c)
def _generate_indices(
left_index: np.ndarray, right_index: np.ndarray, conditions: list
) -> tuple:
"""
Run a for loop to get the final indices.
This iteratively goes through each condition,
builds a boolean array,
and gets indices for rows that meet the condition requirements.
`conditions` is a list of tuples, where a tuple is of the form:
`(Series from df, Series from right, operator)`.
"""
for condition in conditions:
left_c, right_c, op = condition
left_c = extract_array(left_c, extract_numpy=True)[left_index]
right_c = extract_array(right_c, extract_numpy=True)[right_index]
op = operator_map[op]
mask = op(left_c, right_c)
if not mask.any():
return None
if is_extension_array_dtype(mask):
mask = mask.to_numpy(dtype=bool, na_value=False)
if not mask.all():
left_index = left_index[mask]
right_index = right_index[mask]
return left_index, right_index
def _multiple_conditional_join_ne(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
where all the operators are `!=`.
Returns a tuple of (left_index, right_index)
"""
# currently, there is no optimization option here
# not equal typically combines less than
# and greater than, so a lot more rows are returned
# than just less than or greater than
# here we get indices for the first condition in conditions
# then use those indices to get the final indices,
# using _generate_indices
first, *rest = conditions
left_on, right_on, op = first
# get indices from the first condition
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions=False
)
if result is None:
return None
rest = (
(df[left_on], right[right_on], op) for left_on, right_on, op in rest
)
return _generate_indices(*result, rest)
def _multiple_conditional_join_eq(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
if any of the conditions has an `==` operator.
Returns a tuple of (df_index, right_index)
"""
# TODO
# this uses the idea in the `_range_indices` function
# for less than and greater than;
# I'd like to believe there is a smarter/more efficient way of doing this
# where the filter occurs within the join, and avoids a blow-up
# the current implementation uses
# a list comprehension to find first matches
# in a bid to reduce the blow up size ...
# this applies only to integers/dates
# and only offers advantages in scenarios
# where the right is duplicated
# for one to many joins,
# or one to one or strings/category, use merge
# as it is significantly faster than a binary search
eqs = [
(left_on, right_on)
for left_on, right_on, op in conditions
if op == _JoinOperator.STRICTLY_EQUAL.value
]
left_on, right_on = zip(*eqs)
left_on = [*left_on]
right_on = [*right_on]
strings_or_category = any(
col
for col in left_on
if (is_string_dtype(df[col]) | is_categorical_dtype(df[col]))
)
if (
strings_or_category
| (not right.duplicated(subset=right_on).any(axis=None))
| (not df.duplicated(subset=left_on).any(axis=None))
):
rest = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
if op != _JoinOperator.STRICTLY_EQUAL.value
)
left_index, right_index = _MergeOperation(
df,
right,
left_on=left_on,
right_on=right_on,
sort=False,
copy=False,
)._get_join_indexers()
if not left_index.size:
return None
return _generate_indices(left_index, right_index, rest)
left_on, right_on = eqs[0]
outcome = _eq_indices(df[left_on], right[right_on])
if not outcome:
return None
left_index, right_index, lower_boundary, upper_boundary = outcome
eq_check = [condition for condition in conditions if condition != eqs[0]]
rest = [
(df.loc[left_index, left_on], right.loc[right_index, right_on], op)
for left_on, right_on, op in eq_check
]
rest = [
(
extract_array(left_c, extract_numpy=True),
| extract_array(right_c, extract_numpy=True) | pandas.core.construction.extract_array |
import datetime
import json
import pathlib
import numpy as np
import pandas as pd
def downsample(df, offset):
"""Reduce dataframe by resampling according to frequency offset/rule
Parameters
----------
df : pandas.core.frame.DataFrame
A pandas dataframe where the index is the date.
offset : str
offset rule to apply for downsampling
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
Returns
-------
pandas.core.frame.DataFrame
A pandas dataframe that is downsampled
"""
# convert index to DateIndex
df.index = pd.to_datetime(df.index)
# downsample based on offset
resampled = df.resample(offset).asfreq()
# remove dates for which no data is available
resampled.dropna(how='all', inplace=True)
# add last date if it is not present
if df.iloc[-1].name not in resampled.index:
resampled = pd.concat([resampled, df.iloc[[-1]]])
# convert back DateIndex to string as plotly is expectecting string values
resampled.index = resampled.index.strftime('%Y-%m-%d')
return resampled
def prepare_vaccination_by_age_data(data):
# calculate population that hasn't received any dose yet
data['0d'] = data['pop 2021'] - (data['1d'])
# 1d refers to "at least 1 dose" (also contains 2nd doses)
# calculate actual 1 dose
data['1d_plus'] = data['1d']
data['1d'] = data['1d_plus'] - data['2d']
data['total'] = data[['0d', '1d', '2d']].sum(axis=1)
# create mapping of age group to its population
total = dict(data['total'])
# reverse rows so that it will be by ascending age in the figure
data = data.iloc[::-1]
# convert wide to long format
data_long = data.melt(
ignore_index=False,
value_vars=[
'2d',
'1d',
'0d',
]
)
# calculate the percentage of each data point
data_long['perc'] = data_long.apply(
lambda x: x['value'] / total[x.name] * 100, axis=1
)
return data_long
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath('data').resolve()
# load data #####
# Montreal geojson
with open(DATA_PATH.joinpath('montreal_shapefile.geojson'), encoding='utf-8') as shapefile:
mtl_geojson = json.load(shapefile)
# Montreal cases per borough
mtl_boroughs_csv = DATA_PATH.joinpath('processed', 'data_mtl_boroughs.csv')
mtl_boroughs_df = pd.read_csv(
mtl_boroughs_csv,
encoding='utf-8',
na_values='na',
index_col=0,
header=[0, 1]
).dropna(axis=1, how='all')
# downsample
mtl_boroughs = downsample(mtl_boroughs_df, '7d')
# prepare to use in figure
# unstack multi index so that boroughs are its own column as well
mtl_boroughs = mtl_boroughs.unstack().unstack(level=1).reset_index()
mtl_boroughs.sort_values('date', inplace=True)
# Montreal data
data_mtl = pd.read_csv(DATA_PATH.joinpath('processed', 'data_mtl.csv'), encoding='utf-8', na_values='na')
data_mtl_by_age = pd.read_csv(
DATA_PATH.joinpath('processed', 'data_mtl_age.csv'),
encoding='utf-8',
index_col=0,
na_values='na'
)
data_mtl_vaccination = pd.read_csv(DATA_PATH.joinpath('processed', 'data_mtl_vaccination.csv'))
data_mtl_vaccination_age = pd.read_csv(DATA_PATH.joinpath('processed', 'data_mtl_vaccination_age.csv'), index_col=0)
# QC data
data_qc = pd.read_csv(DATA_PATH.joinpath('processed', 'data_qc.csv'), encoding='utf-8', na_values='na')
data_qc_hosp = pd.read_csv(DATA_PATH.joinpath('processed', 'data_qc_hospitalisations.csv'), encoding='utf-8')
# Vaccination_data
data_vaccines = pd.read_csv(DATA_PATH.joinpath('processed', 'data_vaccines.csv'), encoding='utf-8', na_values='na')
data_qc_vaccination = pd.read_csv(DATA_PATH.joinpath('processed', 'data_qc_vaccination.csv'))
data_qc_vaccination_age = pd.read_csv(DATA_PATH.joinpath('processed', 'data_qc_vaccination_age.csv'), index_col=0)
# Variants
data_variants = pd.read_csv(DATA_PATH.joinpath('processed', 'data_variants.csv'), index_col=0, na_values='na')
# MTL deaths by location data
data_mtl_death_loc = pd.read_csv(
DATA_PATH.joinpath('processed', 'data_mtl_death_loc.csv'),
encoding='utf-8',
na_values='na'
)
# Last update date
# Display 1 day after the latest data as data from the previous day are posted
latest_mtl_date = datetime.date.fromisoformat(data_mtl['date'].iloc[-1]) + datetime.timedelta(days=1)
latest_update_date = latest_mtl_date.isoformat()
# Mini info boxes
data_mtl_totals = pd.read_csv(DATA_PATH.joinpath('processed', 'data_mtl_totals.csv'), index_col=0, na_values='na')
data_qc_totals = pd.read_csv(DATA_PATH.joinpath('processed', 'data_qc_totals.csv'), index_col=0, na_values='na')
# Source for 2021 pop estimates: https://publications.msss.gouv.qc.ca/msss/document-001617/
mtl_pop = 2078464 # Région sociosanitaire 06 - Montreal, 2021 projection
qc_pop = 8591866 # QC Total, 2021 projection
# MTL
latest_cases_mtl = str(int(data_mtl_totals['cases'].dropna().iloc[-1]))
new_cases_mtl = int(data_mtl_totals['cases'].diff().iloc[-1])
latest_deaths_mtl = str(int(data_mtl_totals['deaths'].dropna().iloc[-1]))
new_deaths_mtl = int(data_mtl_totals['deaths'].diff().iloc[-1])
new_hosp_mtl = int(data_mtl_totals['hos_cum_reg_n'].diff().iloc[-1])
new_icu_mtl = int(data_mtl_totals['hos_cum_si_n'].diff().iloc[-1])
pos_rate_mtl = float(data_mtl['psi_quo_pos_t'].dropna().iloc[-1])
pos_rate_change_mtl = float(data_mtl['psi_quo_pos_t'].dropna().iloc[-1] - data_mtl['psi_quo_pos_t'].dropna().iloc[-2])
latest_recovered_mtl = str(int(data_mtl_totals['recovered'].dropna().iloc[-1]))
new_recovered_mtl = int(data_mtl_totals['recovered'].diff().iloc[-1])
if pos_rate_mtl < 5:
pos_rate_mtl_colour = '#83AF9B'
else:
pos_rate_mtl_colour = '#D33505'
# 7-days incidence per 100k (and % change vs previous 7 days)
incid_per100k_7d_mtl = float(data_mtl['new_cases'].dropna().iloc[-7:].sum()) / (mtl_pop / 100000)
incid_per100k_last7d_mtl = float(data_mtl['new_cases'].dropna().iloc[-14:-7].sum()) / (mtl_pop / 100000)
incid_per100K_perc_change_mtl = ((incid_per100k_7d_mtl - incid_per100k_last7d_mtl) / incid_per100k_last7d_mtl) * 100
if incid_per100k_7d_mtl < 10:
incid_per100k_7d_mtl_colour = '#7ea47c'
elif incid_per100k_7d_mtl < 25:
incid_per100k_7d_mtl_colour = '#ecd93b'
elif incid_per100k_7d_mtl < 50:
incid_per100k_7d_mtl_colour = '#dfae5a'
elif incid_per100k_7d_mtl < 100:
incid_per100k_7d_mtl_colour = '#df825a'
elif incid_per100k_7d_mtl < 200:
incid_per100k_7d_mtl_colour = '#CC0101'
elif incid_per100k_7d_mtl < 300:
incid_per100k_7d_mtl_colour = '#A80101'
elif incid_per100k_7d_mtl < 500:
incid_per100k_7d_mtl_colour = '#800000'
else:
incid_per100k_7d_mtl_colour = '#600000'
# QC
latest_cases_qc = str(int(data_qc_totals['cases'].dropna().iloc[-1]))
new_cases_qc = int(data_qc_totals['cases'].diff().iloc[-1])
latest_deaths_qc = str(int(data_qc_totals['deaths'].dropna().iloc[-1]))
new_deaths_qc = int(data_qc_totals['deaths'].diff().iloc[-1])
new_hosp_qc = int(data_qc_totals['hos_cum_reg_n'].diff().iloc[-1])
new_icu_qc = int(data_qc_totals['hos_cum_si_n'].diff().iloc[-1])
pos_rate_qc = float(data_qc['psi_quo_pos_t'].dropna().round(2).iloc[-1])
pos_rate_change_qc = float(data_qc['psi_quo_pos_t'].dropna().iloc[-1] - data_qc['psi_quo_pos_t'].dropna().iloc[-2])
latest_recovered_qc = str(int(data_qc_totals['recovered'].dropna().iloc[-1]))
new_recovered_qc = int(data_qc_totals['recovered'].diff().iloc[-1])
if pos_rate_qc < 5:
pos_rate_qc_colour = '#83AF9B'
else:
pos_rate_qc_colour = '#D33505'
# 7-days incidence per 100k (and % change vs previous 7 days)
incid_per100k_7d_qc = float(data_qc['new_cases'].dropna().iloc[-7:].sum()) / (qc_pop / 100000)
incid_per100k_last7d_qc = float(data_qc['new_cases'].dropna().iloc[-14:-7].sum()) / (qc_pop / 100000)
incid_per100K_perc_change_qc = ((incid_per100k_7d_qc - incid_per100k_last7d_qc) / incid_per100k_last7d_qc) * 100
if incid_per100k_7d_qc < 10:
incid_per100k_7d_qc_colour = '#7ea47c'
elif incid_per100k_7d_qc < 25:
incid_per100k_7d_qc_colour = '#ecd93b'
elif incid_per100k_7d_qc < 50:
incid_per100k_7d_qc_colour = '#dfae5a'
elif incid_per100k_7d_qc < 100:
incid_per100k_7d_qc_colour = '#df825a'
elif incid_per100k_7d_qc < 200:
incid_per100k_7d_qc_colour = '#CC0101'
elif incid_per100k_7d_qc < 300:
incid_per100k_7d_qc_colour = '#A80101'
elif incid_per100k_7d_qc < 500:
incid_per100k_7d_qc_colour = '#800000'
else:
incid_per100k_7d_qc_colour = '#600000'
# Vaccination info boxes
# Display 1 day after the latest data as data from the previous day are posted
latest_vaccination_update_date = (
datetime.date.fromisoformat(data_qc_vaccination['date'].iloc[-1])
+ datetime.timedelta(days=1)
)
new_doses_mtl_1d = data_mtl_vaccination['new_doses_1d'].iloc[-1]
new_doses_mtl_2d = data_mtl_vaccination['new_doses_2d'].iloc[-1]
total_doses_mtl_1d = data_mtl_vaccination['total_doses_1d'].iloc[-1]
total_doses_mtl_2d = data_mtl_vaccination['total_doses_2d'].iloc[-1]
perc_vacc_mtl_1d = total_doses_mtl_1d / mtl_pop * 100
perc_vacc_mtl_2d = total_doses_mtl_2d / mtl_pop * 100
new_doses_qc_1d = data_qc_vaccination['new_doses_1d'].iloc[-1]
new_doses_qc_2d = data_qc_vaccination['new_doses_2d'].iloc[-1]
total_doses_qc_1d = data_qc_vaccination['total_doses_1d'].iloc[-1]
total_doses_qc_2d = data_qc_vaccination['total_doses_2d'].iloc[-1]
perc_vacc_qc_1d = total_doses_qc_1d / qc_pop * 100
perc_vacc_qc_2d = total_doses_qc_2d / qc_pop * 100
# Make MTL histogram data tidy
# downsample then reset_index to have date column
# mtl_age_data = downsample(data_mtl_by_age, '7d').reset_index().melt(
# id_vars='date', value_vars=[
# 'cases_mtl_0-4_norm', 'cases_mtl_5-9_norm',
# 'cases_mtl_10-19_norm', 'cases_mtl_20-29_norm',
# 'cases_mtl_30-39_norm', 'cases_mtl_40-49_norm',
# 'cases_mtl_50-59_norm', 'cases_mtl_60-69_norm',
# 'cases_mtl_70-79_norm', 'cases_mtl_80+_norm',
# 'cases_mtl_0-4_per100000_norm', 'cases_mtl_5-9_per100000_norm',
# 'cases_mtl_10-19_per100000_norm', 'cases_mtl_20-29_per100000_norm',
# 'cases_mtl_30-39_per100000_norm', 'cases_mtl_40-49_per100000_norm',
# 'cases_mtl_50-59_per100000_norm', 'cases_mtl_60-69_per100000_norm',
# 'cases_mtl_70-79_per100000_norm', 'cases_mtl_80+_per100000_norm'
# ],
# var_name='age_group', value_name='percent'
# ).dropna()
# set up MTL age data using 7-day mov avg and resample by calendar week
mtl_age = data_mtl_by_age[[
'cases_mtl_0-4', 'cases_mtl_5-9', 'cases_mtl_10-19', 'cases_mtl_20-29',
'cases_mtl_30-39', 'cases_mtl_40-49', 'cases_mtl_50-59',
'cases_mtl_60-69', 'cases_mtl_70-79', 'cases_mtl_80+'
]]
# clean columns
mtl_age.columns = mtl_age.columns.str.split('_').str[-1]
# determine daily new cases and its 7-day rolling avg
mtl_age = mtl_age.diff().dropna()
mtl_age = mtl_age.rolling(7).mean().dropna().round()
# drop numbers < 0 when corrections happened
mtl_age = mtl_age[mtl_age >= 0]
# calculate % of each age group among sum of cases
mtl_age = mtl_age.div(mtl_age.sum(axis=1), axis=0) * 100
# resample weekly
mtl_age.index = | pd.to_datetime(mtl_age.index) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Special thanks to @KY-Ng for visualisation code!
'''
import numpy as np # pip3 install numpy
from scipy.integrate import ode, solve_ivp # pip3 install scipy
import pandas as pd # pip3 install pandas
import matplotlib.pyplot as plt # pip3 install matplotlib
# { -- CHANGE PARAMETERS HERE
b = 0.1
g = 0.05
I_0 = 0.01
steps = 150
# -- }
def sir(times, y_0, b, g):
S, I, R = y_0
dS = -b * S * I # S
dI = b * S * I - g * I # I
dR = g * I # R
return [dS, dI, dR]
y_0 = [1.0 - I_0, I_0, 0.0]
results = \
solve_ivp( \
fun = lambda t, y: sir(t, y, b, g), \
t_span = [0, steps], \
y0 = y_0, \
t_eval = range(steps + 1) \
)
zipped_list = list(zip(results.y[0], results.y[1], results.y[2]))
for l in zipped_list: print(l)
df = | pd.DataFrame({"S": results.y[0], "I": results.y[1], "R": results.y[2]}) | pandas.DataFrame |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
def get_reddit_data(subreddit, date):
"""
Gets top 26 frontpage titles from 'subreddit' on 'date
:param subreddit: ex: 'r/bitcoin'
:param date: in 'YYYYMMDD'
:return titles: a list of strings of titles
"""
titles = []
url = "https://web.archive.org/web/" + date + "/reddit.com/" + subreddit
print(url)
driver.get(url)
try:
sitetable = driver.find_element_by_id("siteTable")
posts = sitetable.find_elements_by_tag_name("div")
for post in posts:
if len(post.find_elements_by_class_name("title")) > 0:
title = post.find_element_by_class_name("title").text
titles.append(title)
titles = set(titles)
return titles
except NoSuchElementException:
return ['0'] * 26
def format_date(date): # for way-way-back machine urls
"""
Reformats date so that wayback machine will like it
:param date: in datetime64
:return:
"""
year = str(date.year)
month = str(date.month)
if len(month) < 2:
month = "0" + month
day = str(date.day)
if len(day) < 2:
day = "0" + day
return year + month + day
def get_reddit_dataframe(begin, fin, subreddit, writefile):
"""
Makes a big dataframe indexed by a DatetimeIndex for every day from begin to fin. Values are top reddit posts, columns separate titles on a given day/row.
:param begin: starting date of dataframe
:param fin: ending date of dataframe
:param subreddit: subreddit to scrape
:return: none
"""
timeindex = pd.DatetimeIndex(freq='d', start=begin, end=fin)
data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `gffpandas` package."""
import gffpandas.gffpandas as gff3pd
import pandas as pd
import os
written_df = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type='
'genomic DNA;serovar=Typhimurium;strain=SL1344'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
['NC_016810.1', 'RefSeq', 'CDS', 34, 335, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"])
written_header = ('##gff-version 3\n'
'##sequence-region NC_016810.1 1 20\n')
written_csv = ('seq_id,source,type,start,end,score,strand,phase,attributes\n'
'NC_016810.1,RefSeq,region,1,4000,.,+,.,Dbxref=taxon:216597;ID='
'id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;serovar='
'Typhimurium;strain=SL1344\n'
'NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene1;Name=thrL;gbkey='
'Gene;gene=thrL;locus_tag=SL1344_0001\n'
'NC_016810.1,RefSeq,CDS,13,235,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,1,20,.,+,.,ID=gene2;Name=thrA;gbkey='
'Gene;gene=thrA;locus_tag=SL1344_0002\n'
'NC_016810.1,RefSeq,CDS,341,523,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,1,600,.,-,.,ID=gene3;Name=thrX;gbkey='
'Gene;gene=thrX;locus_tag=SL1344_0003\n'
'NC_016810.1,RefSeq,CDS,21,345,.,-,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene3;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,41,255,.,+,.,ID=gene4;Name=thrB;gbkey='
'Gene;gene=thrB;locus_tag=SL1344_0004\n'
'NC_016810.1,RefSeq,CDS,61,195,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1,RefSeq,gene,170,546,.,+,.,ID=gene5;Name=thrC;gbkey'
'=Gene;gene=thrC;locus_tag=SL1344_0005\n'
'NC_016810.1,RefSeq,CDS,34,335,.,+,0,Dbxref=UniProtKB%252FTr'
'EMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051799'
'41.1;Parent=gene5;gbkey=CDS;product=thr operon leader peptide;'
'protein_id=YP_005179941.1;transl_table=11\n')
written_tsv = ('seq_id\tsource\ttype\tstart\tend\tscore\tstrand\tphase\t'
'attributes\n'
'NC_016810.1\tRefSeq\tregion\t1\t4000\t.\t+\t.\tDbxref=taxon:21'
'6597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic DNA;'
'serovar=Typhimurium;strain=SL1344\n'
'NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene1;Name=thrL;'
'gbkey=Gene;gene=thrL;locus_tag=SL1344_0001\n'
'NC_016810.1\tRefSeq\tCDS\t13\t235\t.\t+\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene1;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t1\t20\t.\t+\t.\tID=gene2;Name=thrA;'
'gbkey=Gene;gene=thrA;locus_tag=SL1344_0002\n'
'NC_016810.1\tRefSeq\tCDS\t341\t523\t.\t+\t0\tDbxref=UniProtKB%'
'252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_005'
'179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t1\t600\t.\t-\t.\tID=gene3;Name=thrX'
';gbkey=Gene;gene=thrX;locus_tag=SL1344_0003\n'
'NC_016810.1\tRefSeq\tCDS\t21\t345\t.\t-\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene3;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t41\t255\t.\t+\t.\tID=gene4;Name='
'thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004\n'
'NC_016810.1\tRefSeq\tCDS\t61\t195\t.\t+\t0\tDbxref=UniProtKB%2'
'52FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name=YP_0051'
'79941.1;Parent=gene4;gbkey=CDS;product=thr operon leader '
'peptide;protein_id=YP_005179941.1;transl_table=11\n'
'NC_016810.1\tRefSeq\tgene\t170\t546\t.\t+\t.\tID=gene5;Name='
'thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005\n'
'NC_016810.1\tRefSeq\tCDS\t34\t335\t.\t+\t0\tDbxref=UniProt'
'KB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name='
'YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon '
'leader peptide;protein_id=YP_005179941.1;transl_table=11\n')
written_gff = ('##gff-version 3\n'
'##sequence-region NC_016810.1 1 20\n'
'NC_016810.1 RefSeq region 1 4000 . +'
' . Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=ge'
'nomic;mol_type=genomic DNA;serovar=Typhimurium;strain=SL1344\n'
'NC_016810.1 RefSeq gene 1 20 . +'
' . ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_'
'tag=SL1344_0001\n'
'NC_016810.1 RefSeq CDS 13 235 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene1;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 1 20 . +'
' . ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_'
'tag=SL1344_0002\n'
'NC_016810.1 RefSeq CDS 341 523 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene2;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 1 600 . -'
' . ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_'
'tag=SL1344_0003\n'
'NC_016810.1 RefSeq CDS 21 345 . -'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene3;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 41 255 . +'
' . ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_'
'tag=SL1344_0004\n'
'NC_016810.1 RefSeq CDS 61 195 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene4;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n'
'NC_016810.1 RefSeq gene 170 546 . +'
' . ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_'
'tag=SL1344_0005\n'
'NC_016810.1 RefSeq CDS 34 335 . +'
' 0 Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:Y'
'P_005179941.1;ID=cds0;Name=YP_005179941.1;Parent=gene5;gbkey=C'
'DS;product=thr operon leader peptide;protein_id=YP_005179941.1'
';transl_table=11\n')
written_filtered_length = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[1, 2, 3, 4, 7, 8])
compare_get_feature_by_attribute = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[1, 3, 5, 7, 9])
compare_get_feature_by_attribute2 = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;Name'
'=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader peptide'
';protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID='
'cds0;Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon'
' leader peptide;protein_id=YP_005179941.1;transl_table=11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes"],
index=[4, 6, 8])
written_attribute_df = pd.DataFrame([
['NC_016810.1', 'RefSeq', 'region', 1, 4000, '.', '+', '.',
'Dbxref=taxon:216597;ID=id0;gbkey=Src;genome=genomic;mol_type=genomic'
' DNA;serovar=Typhimurium;strain=SL1344',
'taxon:216597', 'id0', None, None, 'Src', None, 'genomic',
None, 'genomic DNA', None, None, 'Typhimurium', 'SL1344',
None],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene1;Name=thrL;gbkey=Gene;gene=thrL;locus_tag=SL1344_0001',
None, 'gene1', 'thrL', None, 'Gene', 'thrL', None,
'SL1344_0001', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 13, 235, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene1;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1',
'cds0', 'YP_005179941.1', 'gene1', 'CDS', None, None,
None, None, 'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 20, '.', '+', '.',
'ID=gene2;Name=thrA;gbkey=Gene;gene=thrA;locus_tag=SL1344_0002',
None, 'gene2', 'thrA', None, 'Gene', 'thrA', None,
'SL1344_0002', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 341, 523, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene2;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene2', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 1, 600, '.', '-', '.',
'ID=gene3;Name=thrX;gbkey=Gene;gene=thrX;locus_tag=SL1344_0003',
None, 'gene3', 'thrX', None, 'Gene', 'thrX', None,
'SL1344_0003', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 21, 345, '.', '-', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene3;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene3', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 41, 255, '.', '+', '.',
'ID=gene4;Name=thrB;gbkey=Gene;gene=thrB;locus_tag=SL1344_0004',
None, 'gene4', 'thrB', None, 'Gene', 'thrB', None,
'SL1344_0004', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 61, 195, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene4;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene4', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
['NC_016810.1', 'RefSeq', 'gene', 170, 546, '.', '+', '.',
'ID=gene5;Name=thrC;gbkey=Gene;gene=thrC;locus_tag=SL1344_0005',
None, 'gene5', 'thrC', None, 'Gene', 'thrC', None,
'SL1344_0005', None, None, None, None, None, None],
['NC_016810.1', 'RefSeq', 'CDS', 34, 335, '.', '+', '0',
'Dbxref=UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1;ID=cds0;'
'Name=YP_005179941.1;Parent=gene5;gbkey=CDS;product=thr operon leader'
' peptide;protein_id=YP_005179941.1;transl_table=11',
'UniProtKB%252FTrEMBL:E1W7M4%2CGenbank:YP_005179941.1', 'cds0',
'YP_005179941.1', 'gene5', 'CDS', None, None, None, None,
'thr operon leader peptide',
'YP_005179941.1', None, None, '11'],
], columns=["seq_id", "source", "type", "start", "end",
"score", "strand", "phase", "attributes", "Dbxref",
"ID", "Name", "Parent", "gbkey", "gene", "genome",
"locus_tag", "mol_type", "product", "protein_id",
"serovar", "strain", "transl_table"])
strand_counts = | pd.value_counts(written_df['strand']) | pandas.value_counts |
from numpy import *
from numpy.random import *
import pandas as pd
import sqlite3
from os import remove
from os.path import exists
from itertools import combinations
db_path = 'db.sqlite3'
force = 1
nb_client = 1e1
nb_guarantee = 1e1
nb_fund_price = 1e1
nb_address_N = 5
nb_address_p = 0.1
nb_purchase_mu = 5
nb_purchase_sigma = 2
cities = ['ottawa', 'edmonton', 'victoria', 'winnipeg', 'fredericton', 'st_john', 'halifax', 'toronto', 'charlottetown', 'quebec', 'regina', 'yellowknife', 'iqaluit', 'whitehorse']
guarantees = ['gmdb', 'gmwb']
genders = ['female', 'male']
funds = ['vanguard', 'fidelity', 'rowe', 'merril', 'morgan', 'barclays', 'sachs', 'paribas', 'fargo', 'suisse', 'citi', 'mizuho', 'lazard', 'evercore', 'nomura', 'jefferies']
seed(0)
if not exists(db_path) or force:
print('making db ... ', end='')
if exists(db_path):
remove(db_path)
db = sqlite3.connect(db_path)
nb_client = int(nb_client)
nb_guarantee = int(nb_guarantee)
nb_fund_price = int(nb_fund_price)
# client
g = guarantees + list(','.join(g) for g in combinations(guarantees,2))
client = pd.DataFrame(dict(
cid = arange(nb_client)+1,
city = choice(cities, nb_client),
phone = randint(1e2, 1e3, nb_client),
guarantee = choice(g, nb_client),
gender = choice(genders, nb_client),
date_of_birth = choice(pd.date_range('1970-01-01', '2000-01-01'), nb_client),
))
client.to_sql('client', db, index=False)
# address
address = pd.DataFrame()
for cid in range(1, nb_client+1):
n = binomial(nb_address_N, nb_address_p)
x = pd.DataFrame(dict(
cid = [cid]*n,
date = choice(pd.date_range('2016-01-01', '2018-01-01'), n),
city = choice(cities, n),
phone = randint(1e2, 1e3, n),
))
address = pd.concat([x, address])
address = address.sample(frac=1)
address.to_sql('address', db, index=False)
# purchase
purchase = | pd.DataFrame() | pandas.DataFrame |
#Copyright 2020 <NAME>, <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import pickle
import pandas as pd
import numpy as np
import os
from pandas.api.types import is_numeric_dtype
from collections import Counter,defaultdict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import metrics
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy as np
from autogluon import TabularPrediction as task
import copy
import tensorflow_data_validation as tfdv
# print(tfdv.version.__version__)
from tensorflow_data_validation.utils.schema_util import get_feature,get_categorical_features,get_categorical_numeric_features,is_categorical_feature,get_multivalent_features
rf_Filename = "RandForest.pkl"
with open(rf_Filename, 'rb') as file: Pickled_LR_Model = pickle.load(file)
del_pattern = r'([^,;\|]+[,;\|]{1}[^,;\|]+){1,}'
del_reg = re.compile(del_pattern)
delimeters = r"(,|;|\|)"
delimeters = re.compile(delimeters)
url_pat = r"(http|ftp|https):\/\/([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?"
url_reg = re.compile(url_pat)
email_pat = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b"
email_reg = re.compile(email_pat)
stop_words = set(stopwords.words('english'))
def summary_stats(dat, key_s):
b_data = []
for col in key_s:
nans = np.count_nonzero(pd.isnull(dat[col]))
dist_val = len(pd.unique(dat[col].dropna()))
Total_val = len(dat[col])
mean = 0
std_dev = 0
var = 0
min_val = 0
max_val = 0
if is_numeric_dtype(dat[col]):
mean = np.mean(dat[col])
if pd.isnull(mean):
mean = 0
std_dev = 0
#var = 0
min_val = 0
max_val = 0
else:
std_dev = np.std(dat[col])
var = np.var(dat[col])
min_val = float(np.min(dat[col]))
max_val = float(np.max(dat[col]))
b_data.append([Total_val, nans, dist_val, mean, std_dev, min_val, max_val])
return b_data
def castability_feature(dat, column_names):
castability_list = []
#make sure the value you are avaluating is not nan
for keys in column_names:
#print(keys)
i = 0
while pd.isnull(dat[keys][i]):
i += 1
if i > len(dat[keys]) - 2:
break
#if type is string try casting
if dat[keys][i].__class__.__name__ == 'str':
try:
castability = str(type(eval(dat[keys][i])))
castability_list.append(1)
except:
castability_list.append(0)
else:
castability_list.append(0)
return castability_list
def numeric_extraction(dat,column_names):
#0 no , 1 yes
numeric_extraction_list = []
#make sure the value you are avaluating is not nan
for keys in column_names:
i = 0
while | pd.isnull(dat[keys][i]) | pandas.isnull |
import unittest
from io import StringIO
import pandas as pd
import numpy as np
from connector.stateful import StatefulConnector
from data import DataManager
from proto.aiengine.v1 import aiengine_pb2
original_csv = "time,baz\n10,1.0\n20,2.0\n30,3.0\n40,4.0\n50,5.0"
original_data = pd.read_csv(StringIO(original_csv))
original_data["time"] = | pd.to_datetime(original_data["time"], unit="s") | pandas.to_datetime |
import warnings
import pandas as pd
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import torch
from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data import GroupNormalizer
from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
from config import load_config
warnings.filterwarnings("ignore")
spec = load_config("config.yaml")
BATCH_SIZE = spec["model_local"]["batch_size"]
MAX_EPOCHS = spec["model_local"]["max_epochs"]
GPUS = spec["model_local"]["gpus"]
LEARNING_RATE = spec["model_local"]["learning_rate"]
HIDDEN_SIZE = spec["model_local"]["hidden_size"]
DROPOUT = spec["model_local"]["dropout"]
HIDDEN_CONTINUOUS_SIZE = spec["model_local"]["hidden_continuous_size"]
GRADIENT_CLIP_VAL = spec["model_local"]["gradient_clip_val"]
data = pd.read_csv("data/poc.csv")
data = data[[
"MERCHANT_1_NUMBER_OF_TRX",
"MERCHANT_2_NUMBER_OF_TRX",
"USER_1_NUMBER_OF_TRX",
"USER_2_NUMBER_OF_TRX",
"TIME"
]]
data = data.rename(columns={'TIME': 'date'})
data = data.set_index("date").stack().reset_index()
data = data.rename(
columns={
'level_1': 'id',
0: 'trx'
}
)
# add time index
data["time_idx"] = pd.to_datetime(data.date).astype(int)
data["time_idx"] -= data["time_idx"].min()
data["time_idx"] = (data.time_idx / 3600000000000) + 1
data["time_idx"] = data["time_idx"].astype(int)
# add datetime variables
data["month"] = pd.to_datetime(data.date).dt.month\
.astype(str)\
.astype("category")
data["day_of_week"] = pd.to_datetime(data.date).dt.dayofweek\
.astype(str)\
.astype("category")
data["hour"] = | pd.to_datetime(data.date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
| tm.assert_series_equal(result, expected, check_index_type=True) | pandas.util.testing.assert_series_equal |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Ground', y="Runs_mean", data=df1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the mean runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGround
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
# Aggregate as sum, mean and count
df=batsman[['Runs','Opposition']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_mean", data=df1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarsp = getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack dataframes
df= pd.concat([won,lost])
df['Runs']= pd.to_numeric(df['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=df)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file installed with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=pd.to_numeric(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumsum()/pd.Series(np.arange(1, len(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
df = pd.DataFrame(d)
df1=df['Dismissal'].groupby(df['Dismissal']).count()
df2 = pd.DataFrame(df1)
df2.columns=['Count']
df3=df2.reset_index(inplace=False)
# Plot a pie chart
plt.pie(df3['Count'], labels=df3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= pd.to_numeric(batsman['Runs'])
# Create the histogram
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
SR=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Filter runs that are are between 2 bins
batsman['Runs']=pd.to_numeric(batsman['Runs'])
a=(batsman['Runs'] > bins[i-1]) & (batsman['Runs'] <= bins[i])
df=batsman[a]
SR.append(np.mean(df['SR']))
atitle = name + "-" + "Strike rate in run ranges"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(midBin, SR, alpha=0.5)
plt.plot(midBin, SR,color="r", alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle)
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanMovingAverage
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def batsmanMovingAverage(file,name="A Squarecut") :
'''
Calculate and plot the Moving Average of the batsman in his career
Description
This function calculates and plots the Moving Average of the batsman in his career
Usage
batsmanMovingAverage(file,name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMovingAverage(pathToFile,"<NAME>")
'''
# Compute the moving average of the time series
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(batsman['Runs'])
date= pd.to_datetime(batsman['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,runs,"-",color = '0.75')
# Compute and plot moving average
y_av = movingaverage(runs, 50)
plt.xlabel('Date')
plt.ylabel('Runs')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanPerfBoxHist
# This function makes a box plot showing the mean, median and the 25th & 75th percentile runs. The
# histogram shows the frequency of scoring runs in different run ranges
#
###########################################################################################
# Plot the batting performance as a combined box plot and histogram
def batsmanPerfBoxHist(file, name="A Hitter"):
'''
Make a boxplot and a histogram of the runs scored by the batsman
Description
Make a boxplot and histogram of the runs scored by the batsman. Plot the Mean, Median, 25th and 75th quantile
Usage
batsmanPerfBoxHist(file, name="A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsman4s(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=pd.to_numeric(batsman['Runs'])
plt.subplot(2,1,1)
sns.boxplot(batsman['Runs'])
plt.subplot(2,1,2);
atitle = name + "'s" + " - Runs Frequency vs Runs"
plt.hist(batsman['Runs'],bins=20, edgecolor='black')
plt.xlabel('Runs')
plt.ylabel('Strike Rate')
plt.title(atitle,size=16)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from statsmodels.tsa.arima_model import ARIMA
import pandas as pd
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: batsmanPerfForecast
# This function forecasts the batsmans performance based on past performance -
# To update
###########################################################################################
def batsmanPerfForecast(file, name="A Squarecut"):
'''
# To do: Currently ARIMA is used.
Forecast the batting performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the batsman based on past performances using HoltWinters forecasting model
Usage
batsmanPerfForecast(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanPerfForecast(pathToFile,"Sachin Tendulkar")
# Note: The above example uses the file tendulkar.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=batsman['Runs'].astype('float')
# Fit a ARIMA model
date= pd.to_datetime(batsman['Start Date'])
df=pd.DataFrame({'date':date,'runs':runs})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
plt.gcf().clear()
print(residuals.describe())
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanPerfHomeAway
# This plots the batsman's performance in home versus abroad
#
###########################################################################################
def batsmanPerfHomeAway(file,name="A Hitter"):
'''
This function analyses the performance of the batsman at home and overseas
Description
This function plots the runs scored by the batsman at home and overseas
Usage
batsmanPerfHomeAway(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist bowlerContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkarSp <-getPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanPerfHomeAway(pathToFile,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create separate DFs for home and away
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Runs']= pd.to_numeric(df['Runs'])
atitle = name + "-" + "- - Runs-Home & overseas"
ax = sns.boxplot(x='venue',y='Runs',data=df)
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 30 Jun 2015
# Function: batsmanRunsFreqPerf
# This function computes and plots the Moving Average of the batsman across his career
#
###########################################################################################
# Plot the performance of the batsman as a continous graph
# Create a performance plot between Runs and RunsFrequency
def batsmanRunsFreqPerf(file, name="A Hookshot"):
'''
Calculate and run frequencies in ranges of 10 runs and plot versus Runs the performance of the batsman
Description
This function calculates frequencies of runs in 10 run buckets and plots this percentage
Usage
batsmanRunsFreqPerf(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsFreqPerf(pathToFile,"Sachin Tendulkar")
'''
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=pd.to_numeric(df['Runs'])
# Plot histogram
runs.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Runs histogram"
plt.title(atitle)
plt.xlabel('Runs')
plt.grid(axis='y', alpha=0.75)
plt.text(180, 90,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanRunsLikelihood
# This function used K-Means to compute and plot the runs likelihood for the batsman
# To do - Include scatterplot
###########################################################################################
def batsmanRunsLikelihood(file, name="A Squarecut") :
'''
This function uses K-Means to determine the likelihood of the batsman to get runs
Description
This function used K-Means to get the likelihood of getting runs based on clusters of runs the batsman made in the past.It uses K-Means for this.
Usage
batsmanRunsLikelihood(file, name = "A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsLikelihood(pathToFile,"<NAME>")
'''
batsman =clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
data = batsman[['Runs','BF','Mins']]
# Create 3 different clusters
kmeans = KMeans(n_clusters=3,max_iter=300)
# Compute the clusters
kmeans.fit(data)
y_kmeans = kmeans.predict(data)
# Get the cluster centroids
centers = kmeans.cluster_centers_
centers
# Add a title
atitle= name + '-' + "Runs Likelihood"
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Draw vertical line 1st centroid
x=[centers[0][0],centers[0][0]]
y=[centers[0][1],centers[0][1]]
z=[0,centers[0][2]]
ax.plot(x,y,z,'k-',color='r',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[1][0],centers[1][0]]
y=[centers[1][1],centers[1][1]]
z=[0,centers[1][2]]
ax.plot(x,y,z,'k-',color='b',alpha=0.8, linewidth=2)
# Draw vertical line 2nd centroid
x=[centers[2][0],centers[2][0]]
y=[centers[2][1],centers[2][1]]
z=[0,centers[2][2]]
ax.plot(x,y,z,'k-',color='k',alpha=0.8, linewidth=2)
ax.set_xlabel('BallsFaced')
ax.set_ylabel('Minutes')
ax.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
from sklearn.linear_model import LinearRegression
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanRunsPredict
# This function predicts the runs that will be scored by the batsman for a given numbers
# of balls faced and minutes at crease
#
###########################################################################################
def batsmanRunsPredict(file, newDF, name="A Coverdrive"):
'''
Predict the runs for the batsman given the Balls Faced and Minutes in crease
Description
Fit a linear regression plane between Runs scored and Minutes in Crease and Balls Faced. This will be used to predict the batsman runs for time in crease and balls faced
Usage
batsmanRunsPredict(file, name="A Coverdrive", newdataframe)
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
newdataframe
This is a data frame with 2 columns BF(Balls Faced) and Mins(Minutes)
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns a data frame with the predicted runs for the Balls Faced and Minutes at crease
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage battingPerf3d batsmanContributionWonLost
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting",
# homeOrAway=c(1,2), result=c(1,2,4))
# Use a single value for BF and Mins
BF= 30
Mins= 20
# retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "tendulkar.csv", package = "cricketr")
batsmanRunsPredict(pathToFile,"<NAME>",newdataframe=data.frame(BF,Mins))
#or give a data frame
BF = np.linspace( 10, 400,15)
Mins = np.linspace(30,220,15)
newDF= pd.DataFrame({'BF':BF,'Mins':Mins}
#values <- batsmanRunsPredict("../cricketr/data/tendulkar.csv","<NAME>",
#print(values)
'''
batsman = clean(file)
df=batsman[['BF','Mins','Runs']]
df['BF']=pd.to_numeric(df['BF'])
df['Runs']=pd.to_numeric(df['Runs'])
xtrain=df.iloc[:,0:2]
ytrain=df.iloc[:,2]
linreg = LinearRegression().fit(xtrain, ytrain)
newDF['Runs']=linreg.predict(newDF)
return(newDF)
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanRunsRanges
# This plots the percentage runs in different run ranges
#
###########################################################################################
def batsmanRunsRanges(file, name= "A Hookshot") :
'''
Compute and plot a histogram of the runs scored in ranges of 10
Description
Compute and plot a histogram of the runs scored in ranges of 10
Usage
batsmanRunsRanges(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar= getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanRunsRanges(pathToFile,"<NAME>")
'''
# Clean file
batsman = clean(file)
runs= pd.to_numeric(batsman['Runs'])
hist, bins = np.histogram(runs, bins = 20)
midBin=[]
# Loop through
for i in range(1,len(bins)):
# Find the mean of the bins (Runs)
midBin.append(np.mean([bins[i-1],bins[i]]))
# Compute binWidth. Subtract '2' to separate the bars
binWidth=bins[1]-bins[0]-2
# Plot a barplot
plt.bar(midBin, hist, bins[1]-bins[0]-2, color="blue")
plt.xlabel('Run ranges')
plt.ylabel('Frequency')
# Add a title
atitle= name + '-' + "Runs % vs Run frequencies"
plt.title(atitle)
plt.text(180, 70,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import LinearRegression
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: battingPerf3d
# This function creates a 3D scatter plot of Runs scored vs Balls Faced and Minutes in crease.
# A regression plane is fitted to this.
#
###########################################################################################
def battingPerf3d(file, name="A Hookshot") :
'''
Make a 3D scatter plot of the Runs scored versus the Balls Faced and Minutes at Crease.
Description
Make a 3D plot of the Runs scored by batsman vs Minutes in crease and Balls faced. Fit a linear regression plane
Usage
battingPerf3d(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar<- getPlayerData(35320,file="tendulkar.csv",type="batting",
#homeOrAway=[1,2],result=[1,2,4])
battingPerf3d(pathToFile,"Sachin Tendulkar")
'''
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
batsman = clean(file)
# Make a 3 D plot and fit a regression plane
atitle = name + "- Runs vs BallsFaced & Minutes"
df2=batsman[['BF','Mins','Runs']]
df2['BF']=pd.to_numeric(df2['BF'])
df2['Mins']=pd.to_numeric(df2['Mins'])
df2['Runs']=pd.to_numeric(df2['Runs'])
X=df2.iloc[:,0:2]
Y=df2.iloc[:,2]
# Fit a Regression place
linreg = LinearRegression().fit(X,Y)
bf= np.linspace(0,400,20)
mins=np.linspace(0,620,20)
xx, yy = np.meshgrid(bf,mins)
xx1=xx.reshape(-1)
yy1=yy.reshape(-1)
test=pd.DataFrame({"BallsFaced": xx1, "Minutes":yy1})
predictedRuns=linreg.predict(test).reshape(20,20)
plt3d = plt.figure().gca(projection='3d')
plt3d.scatter(df2['BF'],df2['Mins'],df2['Runs'])
plt3d.plot_surface(xx.reshape(20,20),yy,predictedRuns, alpha=0.2)
plt3d.set_xlabel('BallsFaced')
plt3d.set_ylabel('Minutes')
plt3d.set_zlabel('Runs');
plt.title(atitle)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append number of matches to Ground
###########################################################################################
def bowlerAvgWktsGround(file, name="A Chinaman"):
'''
This function computes and plot the average wickets in different ground
Description
This function computes the average wickets taken against different grounds by the bowler. It also shows the number innings at each venue
Usage
bowlerAvgWktsGround(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsGround(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Wkts','Ground']].groupby('Ground').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets at Ground"
plt.xticks(rotation='vertical')
plt.axhline(y=4, color='r', linestyle=':')
plt.title(atitle)
ax=sns.barplot(x='Ground', y="Wkts_mean", data=df1)
#plt.bar(df1['Ground'],df1['Wkts_mean'])
plt.text(15, 4,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerAvgWktsOpposition
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
# To do - Append no of matches in Opposition
###########################################################################################
def bowlerAvgWktsOpposition(file, name="A Chinaman"):
'''
This function computes and plot the average wickets against different oppositon
Description
This function computes the average wickets taken against different opposition by the bowler. It also shows the number innings against each opposition
Usage
bowlerAvgWktsOpposition(file, name = "A Chinaman")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerAvgWktsGround
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerAvgWktsOpposition(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
# Aggregate as sum, mean and count
df=bowler[['Opposition','Wkts']].groupby('Opposition').agg(['sum','mean','count'])
#Flatten multi-levels to column names
df.columns= ['_'.join(col).strip() for col in df.columns.values]
# Reset index
df1=df.reset_index(inplace=False)
atitle = name + "-" + "'s Average Wickets vs Opposition"
plt.xticks(rotation='vertical')
plt.axhline(y=3, color='r', linestyle=':')
ax=sns.barplot(x='Opposition', y="Wkts_mean", data=df1)
plt.title(atitle)
plt.text(2, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerContributionWonLost
# This plots the bowler's contribution to won and lost matches
#
###########################################################################################
def bowlerContributionWonLost(file,name="A Doosra"):
'''
Display the bowler's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the bowler in matches that were won and lost as box plots
Usage
bowlerContributionWonLost(file, name = "A Doosra")
Arguments
file
CSV file of bowler from ESPN Cricinfo obtained with getPlayerDataSp()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerContributionWonLost(pathToFile,"<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create DFs for won and lost/drawn
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack DFs
df= pd.concat([won,lost])
df['Wkts']= pd.to_numeric(df['Wkts'])
ax = sns.boxplot(x='status',y='Wkts',data=df)
atitle = name + "-" + "- Wickets in games won/lost-drawn"
plt.xlabel('Status')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgEconRate
# This function computes and plots the cumulative average economy rate of a bowler
#
###########################################################################################
def bowlerCumulativeAvgEconRate(file,name="A Googly"):
'''
Bowler's cumulative average economy rate
Description
This function computes and plots the cumulative average economy rate of a bowler
Usage
bowlerCumulativeAvgEconRate(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgWickets batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgEconRate(pathToFile,"<NAME>")
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
economyRate=pd.to_numeric(bowler['Econ'])
cumEconomyRate = economyRate.cumsum()/pd.Series(np.arange(1, len(economyRate)+1), economyRate.index)
atitle = name + "- Cumulative Economy Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Economy Rate')
plt.title(atitle)
plt.plot(cumEconomyRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerCumulativeAvgWickets
# This function computes and plots the cumulative average wickets of a bowler
#
###########################################################################################
def bowlerCumulativeAvgWickets(file,name="A Googly"):
'''
Bowler's cumulative average wickets
Description
This function computes and plots the cumulative average wickets of a bowler
Usage
bowlerCumulativeAvgWickets(file,name)
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate batsmanCumulativeStrikeRate
Examples
bowlerCumulativeAvgWickets(pathToFile,"<NAME>")
## End(Not run)
'''
bowler=cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wktRate=pd.to_numeric(bowler['Wkts'])
cumWktRate = wktRate.cumsum()/pd.Series(np.arange(1, len(wktRate)+1), wktRate.index)
atitle = name + "- Cumulative Mean Wicket Rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Mean Wickets')
plt.title(atitle)
plt.plot(cumWktRate)
plt.text(150,3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerEconRate
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerEconRate(file, name="A Bowler") :
'''
Compute and plot the Mean Economy Rate versus wickets taken
Description
This function computes the mean economy rate for the wickets taken and plot this
Usage
bowlerEconRate(file, name = "A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble <- getPlayerData(30176,dir=".", file="kumble.csv",type="batting",
# homeOrAway=[1,2],result=[1,2,4])
bowlerEconRate(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
bowler['Wkts']=pd.to_numeric(bowler['Wkts'])
bowler['Econ']=pd.to_numeric(bowler['Econ'])
atitle = name + "-" + "- Mean economy rate vs Wkts"
df=bowler[['Wkts','Econ']].groupby('Wkts').mean()
df = df.reset_index(inplace=False)
ax=plt.plot('Wkts','Econ',data=df)
plt.xlabel('Wickets')
plt.ylabel('Economy Rate')
plt.title(atitle)
plt.text(6, 3,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerMovingAverage
# This function computes and plots the Moving Average of the Wickets taken for a bowler
# across his career
#
###########################################################################################
# Compute a moving average
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def bowlerMovingAverage(file,name="A Doosra") :
'''
Compute and plot the moving average of the wickets taken for a bowler
Description
This function plots the wickets taken by a bowler as a time series and plots the moving average over the career
Usage
bowlerMovingAverage(file, name = "A Doosra")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerMovingAverage(pathToFile,"<NAME>")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=pd.to_numeric(bowler['Wkts'])
date= pd.to_datetime(bowler['Start Date'])
atitle = name + "'s Moving average (Runs)"
# Plot the runs in grey colo
plt.plot(date,wkts,"-",color = '0.75')
y_av = movingaverage(wkts, 50)
plt.xlabel('Date')
plt.ylabel('Wickets')
plt.plot(date, y_av,"b")
plt.title(atitle)
plt.text('2002-01-03',150,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 20 Oct 2018
# Function: bowlerPerfForecast
# This function forecasts the bowler's performance based on past performance
#
###########################################################################################
def bowlerPerfForecast(file, name="A Googly"):
'''
# To do- Currently based on ARIMA
Forecast the bowler performance based on past performances using Holt-Winters forecasting
Description
This function forecasts the performance of the bowler based on past performances using HoltWinters forecasting model
Usage
bowlerPerfForecast(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerEconRate, bowlerMovingAverage, bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerPerfForecast(pathToFile,"Anil Kumble")
'''
bowler= cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
wkts=bowler['Wkts'].astype('float')
date= pd.to_datetime(bowler['Start Date'])
df=pd.DataFrame({'date':date,'Wickets':wkts})
df1=df.set_index('date')
model = ARIMA(df1, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
residuals.plot(kind='kde')
atitle=name+"-ARIMA plot"
plt.title(atitle)
plt.show()
plt.gcf().clear()
print(residuals.describe())
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerPerfHomeAway
# This plots the bowler's performance home and abroad
#
###########################################################################################
def bowlerPerfHomeAway(file,name="A Googly") :
'''
This function analyses the performance of the bowler at home and overseas
Description
This function plots the Wickets taken by the batsman at home and overseas
Usage
bowlerPerfHomeAway(file, name = "A Googly")
Arguments
file
CSV file of the bowler from ESPN Cricinfo (for e.g. Kumble's profile no:30176)
name
Name of bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerMovingAverage bowlerPerfForecast checkBowlerInForm bowlerContributionWonLost
Examples
# Get or use the <bowler>.csv obtained with getPlayerDataSp()
#kumbleSp <-getPlayerDataSp(30176,".","kumblesp.csv","bowling")
bowlerPerfHomeAway(path,"<NAME>")
'''
playersp = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
#
home = playersp[playersp['ha'] == 1]
away = playersp[playersp['ha']==2]
home['venue']="Home"
away['venue']="Overseas"
df= pd.concat([home,away])
df['Wkts']= pd.to_numeric(df['Wkts'])
atitle = name + "-" + "- - Wickets-Home & overseas"
ax = sns.boxplot(x='venue',y='Wkts',data=df)
plt.xlabel('Venue')
plt.ylabel('Wickets')
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsFreqPercent
# This function plots the Frequency percentage of wickets taken for the bowler
#
###########################################################################################
def bowlerWktsFreqPercent(file, name="A Bowler"):
'''
Plot the Wickets Frequency as a percentage against wickets taken
Description
This function calculates the Wickets frequency as a percentage of total wickets taken and plots this agains the wickets taken.
Usage
bowlerWktsFreqPercent(file, name="A Bowler")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerWktsFreqPercent(pathToFile,"Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a table of wickets
wkts = pd.to_numeric(bowler['Wkts'])
wkts.plot.hist(grid=True, bins=20, rwidth=0.9, color='#607c8e')
atitle = name + "'s" + " Wickets histogram"
plt.title(atitle)
plt.xlabel('Wickets')
plt.grid(axis='y', alpha=0.75)
plt.text(5,10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: bowlerWktsRunsPlot
# This function makes boxplot of Wickets versus Runs concded
###########################################################################################
def bowlerWktsRunsPlot(file, name="A Googly"):
'''
Compute and plot the runs conceded versus the wickets taken
Description
This function creates boxplots on the runs conceded for wickets taken for the bowler
Usage
bowlerWktsRunsPlot(file, name = "A Googly")
Arguments
file
This is the <bowler>.csv file obtained with an initial getPlayerData()
name
Name of the bowler
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
bowlerWktsFreqPercent relativeBowlingER relativeBowlingPerf bowlerHistWickets
Examples
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerWktsRunsPlot(pathToFile,"Anil Kumble")
'''
bowler = cleanBowlerData(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
atitle = name + "- Wickets vs Runs conceded"
ax = sns.boxplot(x='Wkts', y='Runs', data=bowler)
plt.title(atitle)
plt.xlabel('Wickets')
plt.show()
plt.gcf().clear()
return
import pandas as pd
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function : clean
# This function cleans the batsman's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def clean(batsmanCSV):
'''
Create a batsman data frame given the batsman's CSV file
Description
The function removes rows from the batsman dataframe where the batsman did not bat (DNB) or the team did not bat (TDNB). COnverts not outs '*' (97*, 128*) to 97,128 by stripping the '*' character. It picks all the complete cases and returns the data frame
Usage
clean(file)
Arguments
file
CSV file with the batsman data obtained with getPlayerData
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the cleaned batsman dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html https://gigadom.wordpress.com/
See Also
cleanBowlerData getPlayerData batsman4s batsmanMovingAverage
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
#tendulkar <- getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
clean(pathToFile)
'''
df = pd.read_csv(batsmanCSV,na_values=['-'])
a = df['Runs'] != "DNB"
batsman = df[a]
# Remove rows with 'TDNB'
c =batsman['Runs'] != "TDNB"
batsman = batsman[c]
# Remove rows with absent
d = batsman['Runs'] != "absent"
batsman = batsman[d]
# Remove the "* indicating not out
batsman['Runs']= batsman['Runs'].str.replace(r"[*]","")
# Drop rows which have NA
batsman = batsman.dropna()
#Return the data frame
return(batsman)
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function : cleanBowlerData
# This function cleans the bowler's data file and returns the cleaned data frame for use in
# other functions
##########################################################################################
def cleanBowlerData(file):
'''
Clean the bowlers data frame
Description
Clean the bowler's CSV fileand remove rows DNB(Did not bowl) & TDNB (Team did not bowl). Also normalize all 8 ball over to a 6 ball over for earlier bowlers
Usage
cleanBowlerData(file)
Arguments
file
The <bowler>.csv file
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
A cleaned bowler data frame with complete cases
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
clean
Examples
# Get bowling data and store in file for future
# kumble <- getPlayerData(30176,dir="./mytest", file="kumble.csv",type="bowling",
# homeOrAway=[1],result=[1,2])
cleanBowlerData(pathToFile)
'''
# Read the <bowler>.csv file
df = pd.read_csv(file,na_values=['-'])
# Remove rows with did not bowl
a = df['Overs']!= "DNB"
df = df[a]
# Remove rows with 'TDNB' - team did not bowl
c =df['Overs'] != "TDNB"
df = df[c]
# Get all complete cases
bowlerComplete = df.dropna(axis=1)
# Normalize overs which had 8 balls per over to the number of overs if there 8 balls per over
if bowlerComplete.columns[2] =="BPO":
bowlerComplete['Overs'] = pd.to_numeric(bowlerComplete['Overs']) *8/6
return(bowlerComplete)
import pandas as pd
import os
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function : getPlayerData
# This function gets the data of batsman/bowler and returns the data frame. This data frame can
# stored for use in other functions
##########################################################################################
def getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",type="batting",
homeOrAway=[1,2],result=[1,2,4],create=True) :
'''
Get the player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
Description
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
Usage
getPlayerData(profile,opposition="",host="",dir="./data",file="player001.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
Arguments
profile
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
opposition
The numerical value of the opposition country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
host
The numerical value of the host country e.g.Australia,India, England etc. The values are Australia:2,Bangladesh:25,England:1,India:6,New Zealand:5,Pakistan:7,South Africa:3,Sri Lanka:8, West Indies:4, Zimbabwe:9
dir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./data"
file
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
type
type of data required. This can be "batting" or "bowling"
homeOrAway
This is vector with either 1,2 or both. 1 is for home 2 is for away
result
This is a vector that can take values 1,2,4. 1 - won match 2- lost match 4- draw
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerDataSp
Examples
## Not run:
# Both home and away. Result = won,lost and drawn
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar1.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar2.csv",
type="batting",homeOrAway=c(2),result=c(1,2))
# Get bowling data and store in file for future
kumble <- getPlayerData(30176,dir="../cricketr/data",file="kumble1.csv",
type="bowling",homeOrAway=c(1),result=c(1,2))
#Get the Tendulkar's Performance against Australia in Australia
tendulkar <-getPlayerData(35320, opposition = 2,host=2,dir=".",
file="tendulkarVsAusInAus.csv",type="batting")
'''
# Initial url to ""
url =""
suburl1 = "http://stats.espncricinfo.com/ci/engine/player/"
suburl2 ="?class=1;"
suburl3 = "template=results;"
suburl4 = "view=innings"
#Set opposition
theOpposition = "opposition=" + opposition + ";"
# Set host country
hostCountry = "host=" + host + ";"
# Create a profile.html with the profile number
player = str(profile) + ".html"
# Set the home or away
str1=str2=""
#print(len(homeOrAway))
for i in homeOrAway:
if i == 1:
str1 = str1 + "home_or_away=1;"
elif i == 2:
str1 = str1 + "home_or_away=2;"
HA= str1
# Set the type batting or bowling
t = "type=" + type + ";"
# Set the result based on input
str2=""
for i in result:
if i == 1:
str2 = str2+ "result=1;"
elif i == 2:
str2 = str2 + "result=2;"
elif i == 4:
str2 = str2 + "result=4;"
result = str2
# Create composite URL
url = suburl1 + player + suburl2 + hostCountry + theOpposition + HA + result + suburl3 + t + suburl4
#print(url)
# Read the data from ESPN Cricinfo
dfList= pd.read_html(url)
# Choose appropriate table from list of returned tables
df=dfList[3]
colnames= df.columns
# Select coiumns based on batting or bowling
if type=="batting" :
# Select columns [1:9,11,12,13]
cols = list(range(0,9))
cols.extend([10,11,12])
elif type=="bowling":
# Check if there are the older version of 8 balls per over (BPO) column
# [1:8,10,11,12]
# Select BPO column for older bowlers
if colnames[1] =="BPO":
# [1:8,10,11,12]
cols = list(range(0,9))
cols.extend([10,11,12])
else:
# Select columns [1:7,9,10,11]
cols = list(range(0,8))
cols.extend([8,9,10])
#Subset the necessary columns
df1 = df.iloc[:, cols]
if not os.path.exists(dir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(dir,file)
if create:
# Write to file
df1.to_csv(path)
# Return the data frame
return(df1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: getPlayerDataSp
# This function is a specialized version of getPlayer Data. This function gets the players data
# along with details on matches' venue( home/abroad) and the result (won,lost,drawn) as
# 2 separate columns
#
###########################################################################################
def getPlayerDataSp(profileNo,tdir="./data",tfile="player001.csv",ttype="batting"):
'''
Get the player data along with venue and result status
Description
This function is a specialized version of getPlayer Data. This function gets the players data along with details on matches' venue (home/abroad) and the result of match(won,lost,drawn) as 2 separate columns (ha & result). The column ha has 1:home and 2: overseas. The column result has values 1:won , 2;lost and :drawn match
Usage
getPlayerDataSp(profileNo, tdir = "./data", tfile = "player001.csv",
ttype = "batting")
Arguments
profileNo
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
tdir
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./tdata"
tfile
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
ttype
type of data required. This can be "batting" or "bowling"
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
Returns the player's dataframe along with the homeAway and the result columns
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
getPlayerData
Examples
## Not run:
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerDataSp(35320,tdir="../cricketr/data", tfile="tendulkarsp.csv",ttype="batting")
# Get bowling data and store in file for future
kumble <- getPlayerDataSp(30176,tdir="../cricketr/data",tfile="kumblesp.csv",ttype="bowling")
## End(Not run)
'''
# Get the data for the player i
# Home & won
hw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[1],type=ttype,create=False)
# Home & lost
hl = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[2],type=ttype,create=False)
# Home & drawn
hd = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[1],result=[4],type=ttype,create=False)
# Away and won
aw = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[1],type=ttype,create=False)
#Away and lost
al = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[2],type=ttype,create=False)
# Away and drawn
ad = getPlayerData(profile=profileNo,dir=tdir,file=tfile,homeOrAway=[2],result=[4],type=ttype,create=False)
# Set the values as follows
# ha := home = 1, away =2
# result= won = 1, lost = 2, drawn=4
hw['ha'] = 1
hw['result'] = 1
hl['ha'] = 1
hl['result'] = 2
hd['ha'] = 1
hd['result'] = 4
aw['ha'] = 2
aw['result'] = 1
al['ha'] = 2
al['result'] = 2
ad['ha'] = 2
ad['result'] = 4
if not os.path.exists(tdir):
os.mkdir(dir)
#print("Directory " , dir , " Created ")
else:
pass
#print("Directory " , dir , " already exists, writing to this folder")
# Create path
path= os.path.join(tdir,tfile)
df= pd.concat([hw,hl,hd,aw,al,ad])
# Write to file
df.to_csv(path,index=False)
return(df)
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeAvgRuns(filelist, names):
'''
Relative batsman's cumulative average runs
Description
This function computes and plots the relative cumulative average runs of batsmen
Usage
relativeBatsmanCumulativeAvgRuns(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeStrikeRate relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1=pd.DataFrame()
# Set figure size
rcParams['figure.figsize'] = 10,6
for idx,file in enumerate(filelist):
df=clean(file)
runs=pd.to_numeric(df['Runs'])
df1[names[idx]] = runs.cumsum()/pd.Series(np.arange(1, len(runs)+1), runs.index)
df1.plot()
plt.xlabel('Innings')
plt.ylabel('Cumulative Average Runs')
plt.title('Relative batsmen cumulative average runs')
plt.text(180, 50,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: relativeBatsmanCumulativeAvgRuns
# This function computes and plots the relative cumulative average runs of batsmen
#
###########################################################################################
def relativeBatsmanCumulativeStrikeRate (filelist, names):
'''
Relative batsmen cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of batsmen
Usage
relativeBatsmanCumulativeStrikeRate(frames, names)
Arguments
frames
This is a list of <batsman>.csv files obtained with an initial getPlayerData()
names
A list of batsmen names who need to be compared
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
relativeBatsmanCumulativeAvgRuns relativeBowlerCumulativeAvgEconRate relativeBowlerCumulativeAvgWickets
Examples
batsmen=["tendulkar.csv","dravid.csv","ganguly.csv"]
names = ["Tendulkar","Dravid","Ganguly"]
relativeBatsmanCumulativeAvgRuns(batsmen,names)
'''
df1= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
from functools import lru_cache
import gzip
import hashlib
# import inspect
import lzma
import math
import os.path
import pickle
import sys
import time
import xattr
from collections import namedtuple
from itertools import chain
from functools import wraps
# from functools import partial
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Iterable
# import scipy.signal
import numpy as np
import pandas as pd
from astropy import units as u
from astropy import constants as const
import artistools
PYDIR = os.path.dirname(os.path.abspath(__file__))
plt.style.use('file://' + PYDIR + '/matplotlibrc')
elsymbols = ['n'] + list(pd.read_csv(os.path.join(artistools.config['path_datadir'], 'elements.csv'))['symbol'].values)
roman_numerals = ('', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX',
'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI', 'XVII', 'XVIII', 'XIX', 'XX')
def diskcache(ignoreargs=[], ignorekwargs=[], saveonly=False, quiet=False, savegzipped=False,
funcdepends=None, funcversion=None):
def printopt(*args, **kwargs):
if not quiet:
print(*args, **kwargs)
@wraps(diskcache)
def diskcacheinner(func):
@wraps(func)
def wrapper(*args, **kwargs):
# save cached files in the folder of the first file/folder specified in the arguments
modelpath = Path()
if 'modelpath' in kwargs:
modelpath = kwargs['modelpath']
else:
for arg in args:
if os.path.isdir(arg):
modelpath = arg
break
if os.path.isfile(arg):
modelpath = Path(arg).parent
break
cachefolder = Path(modelpath, '__artistoolscache__.nosync')
if cachefolder.is_dir():
try:
xattr.setxattr(cachefolder, "com.dropbox.ignored", b'1')
except OSError:
pass
namearghash = hashlib.sha1()
namearghash.update(func.__module__.encode('utf-8'))
namearghash.update(func.__qualname__.encode('utf-8'))
namearghash.update(
str(tuple(arg for argindex, arg in enumerate(args) if argindex not in ignoreargs)).encode('utf-8'))
namearghash.update(str({k: v for k, v in kwargs.items() if k not in ignorekwargs}).encode('utf-8'))
namearghash_strhex = namearghash.hexdigest()
filename_nogz = Path(cachefolder, f'cached-{func.__module__}.{func.__qualname__}-{namearghash_strhex}.tmp')
filename_gz = Path(cachefolder, f'cached-{func.__module__}.{func.__qualname__}-{namearghash_strhex}.tmp.gz')
execfunc = True
saveresult = False
functime = -1
if (filename_nogz.exists() or filename_gz.exists()) and not saveonly:
# found a candidate file, so load it
filename = filename_nogz if filename_nogz.exists() else filename_gz
filesize = Path(filename).stat().st_size / 1024 / 1024
try:
printopt(f"diskcache: Loading '{filename}' ({filesize:.1f} MiB)...")
with zopen(filename, 'rb') as f:
result, version_filein = pickle.load(f)
if version_filein == str_funcversion:
execfunc = False
elif (not funcversion) and (not version_filein.startswith('funcversion_')):
execfunc = False
# elif version_filein == sourcehash_strhex:
# execfunc = False
else:
printopt(f"diskcache: Overwriting '{filename}' (function version mismatch)")
except Exception as ex:
# ex = sys.exc_info()[0]
printopt(f"diskcache: Overwriting '{filename}' (Error: {ex})")
pass
if execfunc:
timestart = time.time()
result = func(*args, **kwargs)
functime = time.time() - timestart
if functime > 1:
# slow functions are worth saving to disk
saveresult = True
else:
# check if we need to replace the gzipped or non-gzipped file with the correct one
# if we so, need to save the new file even though functime is unknown since we read
# from disk version instead of executing the function
if savegzipped and filename_nogz.exists():
saveresult = True
elif not savegzipped and filename_gz.exists():
saveresult = True
if saveresult:
# if the cache folder doesn't exist, create it
if not cachefolder.is_dir():
cachefolder.mkdir(parents=True, exist_ok=True)
try:
xattr.setxattr(cachefolder, "com.dropbox.ignored", b'1')
except OSError:
pass
if filename_nogz.exists():
filename_nogz.unlink()
if filename_gz.exists():
filename_gz.unlink()
fopen, filename = (gzip.open, filename_gz) if savegzipped else (open, filename_nogz)
with fopen(filename, 'wb') as f:
pickle.dump((result, str_funcversion), f, protocol=pickle.HIGHEST_PROTOCOL)
filesize = Path(filename).stat().st_size / 1024 / 1024
printopt(f"diskcache: Saved '{filename}' ({filesize:.1f} MiB, functime {functime:.1f}s)")
return result
# sourcehash = hashlib.sha1()
# sourcehash.update(inspect.getsource(func).encode('utf-8'))
# if funcdepends:
# try:
# for f in funcdepends:
# sourcehash.update(inspect.getsource(f).encode('utf-8'))
# except TypeError:
# sourcehash.update(inspect.getsource(funcdepends).encode('utf-8'))
#
# sourcehash_strhex = sourcehash.hexdigest()
str_funcversion = f'funcversion_{funcversion}' if funcversion else 'funcversion_none'
return wrapper if artistools.enable_diskcache else func
return diskcacheinner
class AppendPath(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
# if getattr(args, self.dest) is None:
# setattr(args, self.dest, [])
if isinstance(values, Iterable):
pathlist = getattr(args, self.dest)
# not pathlist avoids repeated appending of the same items when called from Python
# instead of from the command line
if not pathlist:
for pathstr in values:
# print(f"pathstr {pathstr}")
# if Path(pathstr) not in pathlist:
pathlist.append(Path(pathstr))
else:
setattr(args, self.dest, Path(values))
class ExponentLabelFormatter(ticker.ScalarFormatter):
"""Formatter to move the 'x10^x' offset text into the axis label."""
def __init__(self, labeltemplate, useMathText=True, decimalplaces=None):
self.set_labeltemplate(labeltemplate)
self.decimalplaces = decimalplaces
super().__init__(useOffset=True, useMathText=useMathText)
# ticker.ScalarFormatter.__init__(self, useOffset=useOffset, useMathText=useMathText)
def _set_formatted_label_text(self):
# or use self.orderOfMagnitude
stroffset = self.get_offset().replace(r'$\times', '$') + ' '
strnewlabel = self.labeltemplate.format(stroffset)
self.axis.set_label_text(strnewlabel)
assert(self.offset == 0)
self.axis.offsetText.set_visible(False)
def set_labeltemplate(self, labeltemplate):
assert '{' in labeltemplate
self.labeltemplate = labeltemplate
def set_locs(self, locs):
if self.decimalplaces is not None:
self.format = '%1.' + str(self.decimalplaces) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$%s$' % ('\\mathdefault{%s}' % self.format)
super().set_locs(locs)
if self.decimalplaces is not None:
# rounding the tick labels will make the locations incorrect unless we round these too
newlocs = [float(('%1.' + str(self.decimalplaces) + 'f') % (x / (10 ** self.orderOfMagnitude)))
* (10 ** self.orderOfMagnitude) for x in self.locs]
super().set_locs(newlocs)
self._set_formatted_label_text()
def set_axis(self, axis):
super().set_axis(axis)
self._set_formatted_label_text()
def make_namedtuple(typename, **fields):
"""Make a namedtuple from a dictionary of attributes and values.
Example: make_namedtuple('mytuple', x=2, y=3)"""
return namedtuple(typename, fields)(*fields.values())
def showtimesteptimes(modelpath=None, numberofcolumns=5, args=None):
"""Print a table showing the timesteps and their corresponding times."""
if modelpath is None:
modelpath = Path()
print('Timesteps and midpoint times in days:\n')
times = get_timestep_times_float(modelpath, loc='mid')
indexendofcolumnone = math.ceil((len(times) - 1) / numberofcolumns)
for rownum in range(0, indexendofcolumnone):
strline = ""
for colnum in range(numberofcolumns):
if colnum > 0:
strline += '\t'
newindex = rownum + colnum * indexendofcolumnone
if newindex + 1 < len(times):
strline += f'{newindex:4d}: {float(times[newindex + 1]):.3f}d'
print(strline)
@lru_cache(maxsize=8)
def get_composition_data(filename):
"""Return a pandas DataFrame containing details of included elements and ions."""
if os.path.isdir(Path(filename)):
filename = os.path.join(filename, 'compositiondata.txt')
columns = ('Z,nions,lowermost_ionstage,uppermost_ionstage,nlevelsmax_readin,'
'abundance,mass,startindex').split(',')
compdf = pd.DataFrame()
with open(filename, 'r') as fcompdata:
nelements = int(fcompdata.readline())
fcompdata.readline() # T_preset
fcompdata.readline() # homogeneous_abundances
startindex = 0
for _ in range(nelements):
line = fcompdata.readline()
linesplit = line.split()
row_list = list(map(int, linesplit[:5])) + list(map(float, linesplit[5:])) + [startindex]
rowdf = pd.DataFrame([row_list], columns=columns)
compdf = compdf.append(rowdf, ignore_index=True)
startindex += int(rowdf['nions'])
return compdf
def get_composition_data_from_outputfile(modelpath):
"""Read ion list from output file"""
atomic_composition = {}
output = open(modelpath / "output_0-0.txt", 'r').read().splitlines()
ioncount = 0
for row in output:
if row.split()[0] == '[input.c]':
split_row = row.split()
if split_row[1] == 'element':
Z = int(split_row[4])
ioncount = 0
elif split_row[1] == 'ion':
ioncount += 1
atomic_composition[Z] = ioncount
composition_df = pd.DataFrame(
[(Z, atomic_composition[Z]) for Z in atomic_composition.keys()], columns=['Z', 'nions'])
composition_df['lowermost_ionstage'] = [1] * composition_df.shape[0]
composition_df['uppermost_ionstage'] = composition_df['nions']
return composition_df
def gather_res_data(res_df, index_of_repeated_value=1):
"""res files repeat output for each angle.
index_of_repeated_value is the value to look for repeating eg. time of ts 0.
In spec_res files it's 1, but in lc_res file it's 0"""
index_to_split = res_df.index[res_df.iloc[:, index_of_repeated_value]
== res_df.iloc[0, index_of_repeated_value]]
res_data = []
for i, index_value in enumerate(index_to_split):
if index_value != index_to_split[-1]:
chunk = res_df.iloc[index_to_split[i]:index_to_split[i + 1], :]
else:
chunk = res_df.iloc[index_to_split[i]:, :]
res_data.append(chunk)
return res_data
def match_closest_time(reftime, searchtimes):
"""Get time closest to reftime in list of times (searchtimes)"""
return str("{}".format(min([float(x) for x in searchtimes], key=lambda x: abs(x - reftime))))
def get_vpkt_config(modelpath):
filename = Path(modelpath, 'vpkt.txt')
vpkt_config = {}
with open(filename, 'r') as vpkt_txt:
vpkt_config['nobsdirections'] = int(vpkt_txt.readline())
vpkt_config['cos_theta'] = [float(x) for x in vpkt_txt.readline().split()]
vpkt_config['phi'] = [float(x) for x in vpkt_txt.readline().split()]
nspecflag = int(vpkt_txt.readline())
if nspecflag == 1:
vpkt_config['nspectraperobs'] = int(vpkt_txt.readline())
for i in range(vpkt_config['nspectraperobs']):
vpkt_txt.readline()
else:
vpkt_config['nspectraperobs'] = 1
vpkt_config['time_limits_enabled'], vpkt_config['initial_time'], vpkt_config['final_time'] = [
int(x) for x in vpkt_txt.readline().split()]
return vpkt_config
@lru_cache(maxsize=8)
def get_grid_mapping(modelpath):
"""Return dict with the associated propagation cells for each model grid cell and
a dict with the associated model grid cell of each propagration cell."""
if os.path.isdir(modelpath):
filename = firstexisting(['grid.out.xz', 'grid.out.gz', 'grid.out'], path=modelpath)
else:
filename = modelpath
assoc_cells = {}
mgi_of_propcells = {}
with open(filename, 'r') as fgrid:
for line in fgrid:
row = line.split()
propcellid, mgi = int(row[0]), int(row[1])
if mgi not in assoc_cells:
assoc_cells[mgi] = []
assoc_cells[mgi].append(propcellid)
mgi_of_propcells[propcellid] = mgi
return assoc_cells, mgi_of_propcells
def get_wid_init_at_tmin(modelpath):
# cell width in cm at time tmin
tmin = get_timestep_times_float(modelpath, loc='start')[0] * u.day.to('s')
_, _, vmax = artistools.inputmodel.get_modeldata(modelpath)
rmax = vmax * tmin
coordmax0 = rmax
ncoordgrid0 = 50
wid_init = 2 * coordmax0 / ncoordgrid0
return wid_init
def get_wid_init_at_tmodel(modelpath, ngridpoints=None, t_model=None, xmax=None):
if ngridpoints is None or t_model is None or xmax is None:
# Luke: ngridpoint only equals the number of model cells if the model is 3D
dfmodel, t_model, vmax = artistools.inputmodel.get_modeldata(modelpath)
ngridpoints = len(dfmodel)
xmax = vmax * t_model
ncoordgridx = round(ngridpoints ** (1. / 3.))
wid_init = 2 * xmax / ncoordgridx
print(xmax, t_model, wid_init)
return wid_init
@lru_cache(maxsize=16)
def get_nu_grid(modelpath):
"""Get an array of frequencies at which the ARTIS spectra are binned by exspec."""
specfilename = firstexisting(['spec.out.gz', 'spec.out', 'specpol.out'], path=modelpath)
specdata = pd.read_csv(specfilename, delim_whitespace=True)
return specdata.loc[:, '0'].values
def get_deposition(modelpath):
times = get_timestep_times_float(modelpath)
depdata = pd.read_csv(Path(modelpath, 'deposition.out'), delim_whitespace=True, header=None, names=[
'time', 'gammadep_over_Lsun', 'posdep_over_Lsun', 'total_dep_over_Lsun'])
depdata.index.name = 'timestep'
# no timesteps are given in deposition.out, so ensure that
# the times in days match up with the times of our assumed timesteps
for timestep, row in depdata.iterrows():
assert(abs(times[timestep] / row['time'] - 1) < 0.01)
return depdata
@lru_cache(maxsize=16)
def get_timestep_times(modelpath):
"""Return a list of the mid time in days of each timestep from a spec.out file."""
try:
specfilename = firstexisting(['spec.out.gz', 'spec.out', 'specpol.out'], path=modelpath)
time_columns = | pd.read_csv(specfilename, delim_whitespace=True, nrows=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 7 19:14:22 2022
@author: aoust
"""
import pandas
import matplotlib.pyplot as plt
import scipy.stats
import numpy as np
d_3_RH = pandas.read_csv("../output/output_heur3_1.500000.csv",sep = ";",header=2)
d_3_RH["Instance name "] = d_3_RH["Instance name "] + "_3channels"
d_6_RH = pandas.read_csv("../output/output_heur6_1.500000.csv",sep = ";",header=2)
d_6_RH["Instance name "] = d_6_RH["Instance name "] + "_6channels"
d_RH = pandas.concat([d_3_RH,d_6_RH])
d_3_GH = | pandas.read_csv("../output/output_greedy3.csv",sep = ";",header=1) | pandas.read_csv |
#!/usr/bin/env python
# v1.0
import pysam
import os
import subprocess
import numpy as np
import pandas as pd
from celescope.tools.step import Step, s_common
import celescope.tools.utils as utils
class Conversion(Step):
"""
Features
- Get conversion pos in each read.
- Get snp info.
Output
- `{sample}.PosTag.bam` Bam file with conversion info.
- `{sample}.PosTag.csv` SNP info in csv format.
"""
def __init__(self, args, step_name):
Step.__init__(self, args, step_name)
# input files
self.ifile = os.path.join(args.outdir, args.sample+'.bam')
self.sample = args.sample
self.strandednessfile = args.strand
self.inbam = args.bam
self.bcfile = args.cell
self.outdir = args.outdir
self.thread = args.thread
# output files
self.outfile_bam = os.path.join(args.outdir, args.sample+'.PosTag.bam')
self.outfile_csv = os.path.join(args.outdir, args.sample+'.PosTag.csv')
@utils.add_log
def run(self):
##Filter and sort
self.fltSort(self.inbam,self.ifile,self.bcfile,self.thread)
cmd=['samtools index',self.ifile]
self.run_cmd(cmd)
##Adding tags
self.addTags(self.ifile,self.outfile_bam,self.strandednessfile)
cmd=['samtools index',self.outfile_bam]
self.run_cmd(cmd)
#Obtaining conversion positions
bam = pysam.AlignmentFile(self.outfile_bam, 'rb')
ContigLocs, AnnoteLocs=self.CountConvperPos(bam)
#Obtaining coverage over conversion position
ConvsPerPos,CoverofPosWithConvs = self.CountReadConverPerConvPos(bam,ContigLocs)
A=self.ExportasVcf(ConvsPerPos,CoverofPosWithConvs,AnnoteLocs)
A['sample'] = self.sample
#Saving result
A.to_csv(self.outfile_csv)
bam.close()
cmd=['rm', self.ifile]
self.run_cmd(cmd)
cmd=['rm', self.ifile+'.bai']
self.run_cmd(cmd)
def run_cmd(self,cmd):
subprocess.call(' '.join(cmd),shell=True)
@utils.add_log
def CountConvperPos(self,bamfile):
ContigLocs={}
AnnoteLocs={}
for read in bamfile.fetch():
try:
if read.get_tag('ST')=='+':
locs=read.get_tag('TL')
else:
locs=read.get_tag('AL')
if locs[0]!=0:
if read.reference_name in ContigLocs:
ContigLocs[read.reference_name].extend(locs)
else:
ContigLocs[read.reference_name] = list(locs)
if read.reference_name not in AnnoteLocs:
for i,each in enumerate(locs):
if i == 0:
AnnoteLocs[read.reference_name] = { each :read.get_tag('XT')}
else:
AnnoteLocs[read.reference_name][each] = read.get_tag('XT')
else:
for i,each in enumerate(locs):
if each not in AnnoteLocs[read.reference_name]:
AnnoteLocs[read.reference_name][each] = read.get_tag('XT')
except (ValueError,KeyError):
continue
return ContigLocs, AnnoteLocs
@utils.add_log
def CountReadConverPerConvPos(self,bam,ContigLocs):
ConvsPerPos={}
CoverofPosWithConvs={}
for key in ContigLocs.keys():
ContigLocs[key]=sorted(ContigLocs[key])
ConvsPerPos[key]={}
k=0
current=ContigLocs[key][k]
k+=1
nextone=ContigLocs[key][k]
while k < len(ContigLocs[key])-1:
ConvsPerPos[key][current]=1
while current == nextone and k < len(ContigLocs[key])-1:
k+=1
nextone=ContigLocs[key][k]
ConvsPerPos[key][current]+=1
current = nextone
if k < len(ContigLocs[key])-1:
k+=1
nextone=ContigLocs[key][k]
CoverofPosWithConvs[key]={}
for key2 in ConvsPerPos[key].keys():
try:
CoverofPosWithConvs[key][key2]=bam.count(key,key2,key2+1)
except ValueError:
continue
return ConvsPerPos,CoverofPosWithConvs
@utils.add_log
def ExportasVcf(self,ConvsPerPos,CoverofPosWithConvs, AnnoteLocs):
#Chrom, Pos , ConvsPerPs, CoverofPosWithConvs
Outputdf =pd.DataFrame(columns=['pos2','convs','covers','chrom','posratio'])
for key in ConvsPerPos.keys():
df=pd.DataFrame.from_dict(ConvsPerPos[key], orient='index')
df1=pd.DataFrame.from_dict(CoverofPosWithConvs[key], orient='index')
df.index.name='pos'
df1.index.name='pos'
df.columns = ['convs']
df1.columns = ['covers']
df2=df.join(df1)
df2['pos2'] = df2.index
df2.index = np.arange(df2.shape[0])
df2['chrom']=np.repeat(key,df2.shape[0])
df2['posratio']=df2['convs']/df2['covers']
df3=pd.DataFrame.from_dict(AnnoteLocs[key], orient='index')
df3.columns = ['gene_id']
df2=df2.join(df3, on='pos2')
Outputdf=Outputdf.append(df2)
return Outputdf.reset_index(drop=True)
def createTag(self,d):
return ''.join([''.join(key) + str(d[key]) + ';' for key in d.keys()])[:-1]
def convInRead(self, read, qual = 20):
specific_conversions = {}
total_content = {'a' : 0, 'c' : 0, 'g' : 0, 't' : 0}
specific_conversions[('c', 'A')] = 0
specific_conversions[('g', 'A')] = 0
specific_conversions[('t', 'A')] = 0
specific_conversions[('a', 'C')] = 0
specific_conversions[('g', 'C')] = 0
specific_conversions[('t', 'C')] = 0
specific_conversions[('a', 'G')] = 0
specific_conversions[('c', 'G')] = 0
specific_conversions[('t', 'G')] = 0
specific_conversions[('a', 'T')] = 0
specific_conversions[('c', 'T')] = 0
specific_conversions[('g', 'T')] = 0
specific_conversions[('a', 'N')] = 0
specific_conversions[('c', 'N')] = 0
specific_conversions[('g', 'N')] = 0
specific_conversions[('t', 'N')] = 0
tC_loc = []
aG_loc = []
try:
refseq = read.get_reference_sequence().lower()
except (UnicodeDecodeError):
refseq=''
for base in total_content.keys():
total_content[base] += refseq.count(base)
for pair in read.get_aligned_pairs(with_seq=True):
try:
if pair[0] is not None and pair[1] is not None and pair[2] is not None:
if str(pair[2]).islower() and not read.query_qualities[pair[0]] < qual:
specific_conversions[(pair[2],read.seq[pair[0]])] += 1
if (pair[2],read.seq[pair[0]]) == ('t', 'C'):
tC_loc.append(pair[1])
if (pair[2],read.seq[pair[0]]) == ('a', 'G'):
aG_loc.append(pair[1])
except (UnicodeDecodeError, KeyError):
continue
SC_tag = self.createTag(specific_conversions)
TC_tag = self.createTag(total_content)
if len(tC_loc) == 0:
tC_loc.append(0)
if len(aG_loc) == 0:
aG_loc.append(0)
return SC_tag, TC_tag, tC_loc, aG_loc
@utils.add_log
def addTags(self,bamfilename, outputname,strandednessfile):
bamfile = pysam.AlignmentFile(bamfilename, 'rb')
mod_bamfile = pysam.AlignmentFile(outputname, mode='wb',template=bamfile)
strandedness = | pd.read_csv(strandednessfile, header=None, index_col=0) | pandas.read_csv |
"""
Generate static website for simple data visualization
"""
import gspread
import pandas as pd
from numpy import nan
import os
import requests
import pytz
from datetime import datetime, timedelta
import json
import math
import dateutil
from pdb import set_trace
import logging
from time import sleep
import io
import re
import pickle
import numpy as np
import argparse
from pkg_resources import resource_filename
from mvpb_util import get_client, read_sheet
# constants
BUOY_NUM = '44020' # Buoy in Nantucket Sound
DATA_DIR = 'data'
# TODO: don't bother with the pickle file
BUOY_HISTORICAL = os.path.join(DATA_DIR, 'historical.pkl')
LOG_LEVEL = logging.INFO
GOOGLE_WAIT_SEC = 60
INKWELL_LAT = 41.452463 # degrees N
INKWELL_LON = -70.553526 # degrees E
US_EASTERN = pytz.timezone('US/Eastern')
UTC = pytz.timezone('UTC')
# init logging
logger = logging.getLogger('mv-polar-bears')
def is_google_quota_error(err):
"""Return True if input 'err' is a Google quota error, else False"""
tf = False
if isinstance(err, gspread.exceptions.APIError):
msg = err.response.json()['error']
code = msg['code']
status = msg['status']
if code == 429 and status == 'RESOURCE_EXHAUSTED':
tf = True
return tf
def is_darksky_quota_error(err):
"""Return Tre if input 'err' is a DarkSky quota error, else False"""
tf = False
if isinstance(err, requests.exceptions.HTTPError):
resp = err.response
if resp.status_code == 403 and 'darksky' in resp.url:
tf = True
return tf
def api(func):
"""
Wrap input function to handle known API issues (e.g., rate limits, errors)
Arguments:
func: callable, to be wrapped and returned
Returns: callable, wrapped version of func that handles API issues
"""
def wrapper(*args, **kwargs):
while True:
# retry until some condition breaks the loop
try:
# attempt to run function
func(*args, **kwargs)
except Exception as err:
# handle known exceptions
if is_google_quota_error(err):
# handle google rate error, retry after delay
msg = 'Google quota exhausted, waiting {}s'.format(GOOGLE_WAIT_SEC)
logger.warning(msg)
sleep(GOOGLE_WAIT_SEC)
continue
if is_darksky_quota_error(err):
# DarkSky API quota exceeded, log error and exit
msg = 'DarkSky API quota exceeded, terminating function'
logger.error(msg)
break
else:
# some other error, fail
raise err
# completed without error
break
return wrapper
def get_column_indices(sheet, base=0):
"""
Return lookup table {column name: column index}
Arguments:
sheet: gspread sheet, connected
base: 0 or 1, index of first column
Return: lookup table
"""
hdr = sheet.row_values(1)
return {name: ii+base for ii, name in enumerate(hdr)}
@api
def add_missing_days(sheet):
"""
Add (empty) rows in sheet for missing days
Arguments:
sheet: gspread sheet, connected
"""
logger.info('Adding rows for missing days')
# get current content
content = read_sheet(sheet)
col2ind = get_column_indices(sheet, base=0)
def new_row(dt):
"""Return list of cells for a new row at datetime 'dt'"""
row = [None] * len(content.columns)
row[col2ind['DATE']] = dt.strftime('%Y-%m-%d')
row[col2ind['TIME']] = dt.strftime('%H:%M %p')
return row
# ensure minimum daily frequency by adding empty rows as needed
rid = 3 # 1-based index to google sheet row
prev_dt = content.iloc[1].name
for ii in range(1, len(content)): # index to local copy
row = content.iloc[ii]
# delete empty rows, they are sometime created by API errors
if all(pd.isnull(row)):
sheet.delete_row(rid)
logger.warning('Deleted empty row, index {}'.format(rid))
continue
# check for gap and fill it
curr_dt = row.name
missing_days = (curr_dt - prev_dt).days - 1
for jj in range(missing_days):
day = prev_dt + timedelta(days=jj + 1)
sheet.insert_row(new_row(day), rid, 'USER_ENTERED')
logger.info('Added row for {} at index {}'.format(day, rid))
rid += 1
# proceed to next row
prev_dt = curr_dt
rid += 1
# add rows up to current day if needed
latest = curr_dt
today = datetime.now(tz=US_EASTERN).replace(hour=7, minute=30, second=0, microsecond=0)
while curr_dt < today:
curr_dt += timedelta(days=1)
sheet.append_row(new_row(curr_dt), 'USER_ENTERED')
logger.info('Appended row for {}'.format(curr_dt))
@api
def add_missing_dows(sheet):
"""
Populate missing day-of-week cells
Arguments:
sheet: gspread sheet, connected
"""
logger.info('Adding missing day-of-week data')
# get current content
content = read_sheet(sheet)
# find col to update
sheet_col_idx = get_column_indices(sheet, base=1)['DAY-OF-WEEK']
# compile list of cells to update at-once
to_update = []
for ii in range(len(content)):
row = content.iloc[ii]
if pd.isnull(row['DAY-OF-WEEK']):
dt = row.name
dow = dt.strftime('%A')
sheet_row_idx = ii + 2 # index in sheet, 1-based with header
cell = gspread.models.Cell(sheet_row_idx, sheet_col_idx, dow)
to_update.append(cell)
logger.info('Queue add day-of-week {} -> {}'.format(dt, dow))
# update all at once
if to_update:
sheet.update_cells(to_update, 'USER_ENTERED')
logger.info('Updated day-of-week for {} rows'.format(len(to_update)))
@api
def add_missing_weather(sheet, darksky_key):
"""
Populate missing weather cells
Arguments:
sheet: gspread sheet, connected
darksky_key: path to DarkSky API key file
"""
logger.info('Adding missing weather conditions data')
# constants
batch_size = 25
weather_col_names = [
'CLOUD-COVER-PERCENT', 'HUMIDITY-PERCENT', 'PRECIP-RATE-INCHES-PER-HOUR',
'PRECIP-PROBABILITY', 'WEATHER-SUMMARY', 'AIR-TEMPERATURE-DEGREES-F',
'WIND-BEARING-CW-DEGREES-FROM-N', 'WIND-GUST-SPEED-MPH',
'WIND-SPEED-MPH']
col_idxs = get_column_indices(sheet, base=1)
with open(darksky_key, 'r') as fp:
key = json.load(fp)['secret_key']
# get current content
content = read_sheet(sheet)
# compile list of cells to update at-once
to_update = []
for ii in range(len(content)):
row = content.iloc[ii]
if all(pd.isnull(row[weather_col_names])):
# get weather
dt = row.name
weather_data = get_weather_conditions(key, dt=dt)
# queue update for all missing cells
sheet_row_idx = ii + 2 # index in sheet, 1-based with header
for col_name in weather_col_names:
sheet_col_idx = col_idxs[col_name]
new_value = weather_data[col_name]
cell = gspread.models.Cell(sheet_row_idx, sheet_col_idx, new_value)
to_update.append(cell)
logger.info('Queue {} -> {} for row {}'.format(col_name, new_value, sheet_row_idx))
# update batch
batch_full = len(to_update) >= batch_size
last_batch = (ii == len(content)-1) and to_update
if batch_full or last_batch:
sheet.update_cells(to_update, 'USER_ENTERED')
logger.info('Updated weather conditions data in {} cells'.format(len(to_update)))
to_update = []
@api
def add_missing_water(sheet):
"""
Populate missing water conditions cells
Arguments:
sheet: gspread sheet, connected
"""
logger.info('Adding missing water conditions data')
# constants
batch_size = 50
water_col_names = [
'WAVE-HEIGHT-METERS',
'DOMINANT-WAVE-PERIOD-SECONDS',
'AVERAGE-WAVE-PERIOD-SECONDS',
'DOMINANT-WAVE-DIRECTION-DEGREES-CW-FROM-N',
'WATER-TEMPERATURE-DEGREES-C']
col_idxs = get_column_indices(sheet, base=1)
# get current content
content = read_sheet(sheet)
# compile list of cells to update at-once
historical = get_historical_water_conditions()
to_update = []
for ii in range(len(content)):
row = content.iloc[ii]
if all(pd.isnull(row[water_col_names])):
# get water conditions
dt = row.name
water_data = get_water_conditions(dt, historical)
# queue update for all missing cells
sheet_row_idx = ii + 2 # index in sheet, 1-based with header
for col_name in water_col_names:
sheet_col_idx = col_idxs[col_name]
new_value = water_data[col_name]
cell = gspread.models.Cell(sheet_row_idx, sheet_col_idx, new_value)
to_update.append(cell)
logger.info('Queue {} -> {} for row {}'.format(col_name, new_value, sheet_row_idx))
# update batch
batch_full = len(to_update) >= batch_size
last_batch = (ii == len(content)-1) and to_update
if batch_full or last_batch:
sheet.update_cells(to_update, 'USER_ENTERED')
logger.info('Updated water conditions data in {} cells'.format(len(to_update)))
to_update = []
def get_weather_conditions(key, lon=INKWELL_LON, lat=INKWELL_LAT, dt=None):
"""
Retrieve forecast or observed weather conditions
Note: using the forecast.io Dark Sky API, documented here:
https://darksky.net/dev/docs
Arguments:
key: string, Dark Sky API key
lon: longitude of a location (in decimal degrees). Positive is east,
negative is west, default is Inkwell beach, Oak Bluffs
lat: latitude of a location (in decimal degrees). Positive is north,
negative is south, default is Inkwell beach, Oak Bluffs
dt: datetime, timezone-aware, time for observation, default is now in
US/Eastern timezone
Returns: Dict with the following fields (renamed from forecast.io):
CLOUD-COVER-PERCENT: The percentage of sky occluded by clouds, between
0 and 1, inclusive.
HUMIDITY-PERCENT: The relative humidity, between 0 and 1, inclusive.
PRECIP-RATE-INCHES-PER-HOUR: The intensity (in inches of liquid water per
hour) of precipitation occurring at the given time. This value is
conditional on probability (that is, assuming any precipitation
occurs at all) for minutely data points, and unconditional
otherwise.
PRECIP-PROBABILITY: The probability of precipitation occurring, between
0 and 1, inclusive.
WEATHER-SUMMARY: A human-readable text summary of this data point.
AIR-TEMPERATURE-DEGREES-F: The air temperature in degrees Fahrenheit.
WIND-BEARING-CW-DEGREES-FROM-N: The direction that the wind is coming
from in degrees, with true north at 0 deg and progressing clockwise.
(If windSpeed is zero, then this value will not be defined.)
WIND-GUST-SPEED-MPH: The wind gust speed in miles per hour.
WIND-SPEED-MPH: The wind speed in miles per hour.
"""
# set defaults
if not dt:
dt = datetime.now(tz=US_EASTERN)
# request data from Dark Sky API (e.g. forecast.io)
stamp = math.floor(dt.timestamp())
url = 'https://api.darksky.net/forecast/{}/{:.10f},{:.10f},{}'.format(
key, lat, lon, stamp)
params = {'units': 'us'}
resp = requests.get(url, params)
resp.raise_for_status()
data = resp.json()
# reformat resulting data
fields = { # forecast.io names -> local names
'cloudCover': 'CLOUD-COVER-PERCENT',
'humidity': 'HUMIDITY-PERCENT',
'precipIntensity': 'PRECIP-RATE-INCHES-PER-HOUR',
'precipProbability': 'PRECIP-PROBABILITY',
'summary': 'WEATHER-SUMMARY',
'temperature': 'AIR-TEMPERATURE-DEGREES-F',
'windBearing': 'WIND-BEARING-CW-DEGREES-FROM-N',
'windGust': 'WIND-GUST-SPEED-MPH',
'windSpeed': 'WIND-SPEED-MPH',
}
return {v: data['currently'].get(n, None) for n, v in fields.items()}
def _water_to_datetime(row):
dt = datetime(
year=round(row['YY']), month=round(row['MM']), day=round(row['DD']),
hour=round(row['hh']), minute=round(row['mm']), tzinfo=UTC)
return dt
def _water_to_dataframe(txt):
stream = io.StringIO(re.sub(r' +', ' ', txt).replace('#', ''))
data = | pd.read_csv(stream, sep=' ', skiprows=[1]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 13:55:59 2021
@author: tatia
"""
from dataproc.cohort import query_esbl_pts, remove_dups, observation_window
from dataproc.sampling import generate_samples
from dataproc.sampling import stratify_set
from dataproc.roc_auc_curves import plt_roc_auc_curve, plt_precision_recall_curve
from dataproc.create_dataset import dataset_creation
from dataproc.create_dataset import prescriptions
from dataproc.create_dataset import previous_admissions
from dataproc.create_dataset import open_wounds_diags, intubation_cpt, noteevents
from dataproc.embeddings import loinc_values
from hyper_params import HyperParams
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
import re
# load hyperparams instance
params = HyperParams()
def cohort_creation(observation_window_hours):
# Select esbl microbiology test
esbl_admits = query_esbl_pts()
# Remove dups
esbl_admits = remove_dups(esbl_admits)
# Create observation window
esbl_admits_window = observation_window(esbl_admits, window_size=observation_window_hours)
# Subset columns
pts_labels = esbl_admits_window[['hadm_id', 'index_date','RESISTANT_YN']]
return pts_labels
def loinc_values_proc(loinc_codes):
loinc_vals = loinc_values(loinc_codes)
loinc_vals.dropna(subset=['value'], inplace=True)
loinc_vals = loinc_vals.astype({'value': 'string', 'loinc_code': 'category'})
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('LESS THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('GREATER THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('>GREATER THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<LESS THAN '))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.rstrip(' NG/ML'))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<>'))
loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.replace(',', '.'))
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO ANALYZE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'MOLYSIS FALSELY DECREASES THIS RESULT'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'COMPUTER NETWORK FAILURE. TEST NOT RESULTED.'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO DETERMINE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == ':UNABLE TO DETERMINE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO QUANTITATE'].index), inplace=True)
loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO REPORT'].index), inplace=True)
return loinc_vals
def lab_records_categories(loinc_vals):
numeric = []
categorical = []
weird = []
for code in loinc_codes:
size = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'])
size_unique = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'].unique())
sum_na = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').isna().sum()
if sum_na / size < 0.05:
numeric.append(code)
elif sum_na / size > 0.05 and size_unique < 100:
categorical.append(code)
else:
weird.append(code)
# Remove columns that are not useful:
# remove lab column that contains only 'inf' and 'Nan'
numeric.remove('26498-6')
# remove lab column that only contains phrase 'See comments'
categorical.remove('33914-3')
# remove lab column that contains phrase 'Random'
categorical.remove('13362-9')
return numeric, categorical, weird
def sum_stats_numeric_labs(loinc_vals, numeric):
numeric_stats = []
for code in numeric:
a = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').describe()
numeric_stats.append(a)
numeric_stats_df = pd.concat(numeric_stats, axis=1, keys=numeric)
return numeric_stats_df
def stanardize_numeric_values(df, list_of_clms, ref_df):
"""
Use the median and interquartile range to
standardize the numeric variables
value = (value – median) / (p75 – p25)
"""
for code in list_of_clms:
median = ref_df[code]['50%']
p25 = ref_df[code]['25%']
p75 = ref_df[code]['75%']
df[code] = (df[code] - median) / (p75 - p25)
# Subset relevant columns
columns = ['hadm_id'] + list_of_clms
df = df[columns].copy()
return df
def replace_missing_val(df, list_of_clms, how='median'):
"""
Imputation of missing values using median
"""
imp = SimpleImputer(strategy=how)
df_prc = imp.fit_transform(df[list_of_clms])
df_prc = pd.DataFrame(df_prc, columns=list_of_clms)
# Concat hadm_id and df_prc
out = pd.concat([df['hadm_id'], df_prc], axis = 1)
return out
def clean_categoric_lab_records(df, categorical):
df['30089-7'] = np.where(df['30089-7'].isin(['<1','1','2']), '0-2',
np.where(df['30089-7'].isin(['3','4']),'3-5', df['30089-7']))
df['5767-9'] = np.where(df['5767-9'].isin(['CLEAR']), 'Clear',
np.where(df['5767-9'].isin(['SLHAZY']), 'SlHazy',
np.where(df['5767-9'].isin(['HAZY']), 'Hazy',
np.where(df['5767-9'].isin(['SlCloudy']),'SlCldy',
np.where(df['5767-9'].isin(['CLOUDY']),'Cloudy',df['5767-9'])))))
df['5769-5'] = np.where(df['5769-5'].isin(['0']), 'NEG',
np.where(df['5769-5'].isin(['NOTDONE']), 'NONE',
np.where(df['5769-5'].isin(['LRG']), 'MANY', df['5769-5'])))
df['5778-6'] = np.where(df['5778-6'].isin(['YELLOW','YEL']), 'Yellow',
np.where(df['5778-6'].isin(['STRAW']), 'Straw',
np.where(df['5778-6'].isin(['AMBER','AMB']), 'Amber',
np.where(df['5778-6'].isin(['RED']), 'Red',
np.where(df['5778-6'].isin(['ORANGE']), 'Orange',
np.where(df['5778-6'].isin(['DKAMB','DKAMBER']), 'DkAmb',
np.where(df['5778-6'].isin([' ']), np.nan, df['5778-6'])))))))
df['5797-6'] = np.where(df['5797-6'].isin(['>80']), '80',df['5797-6'])
df['5804-0'] = np.where(df['5804-0'].isin(['>300']), '300',
np.where(df['5804-0'].isin([' ']), np.nan, df['5804-0']))
df['5818-0'] = np.where(df['5818-0'].isin(['.2']), '0.2',
np.where(df['5818-0'].isin(['>8','>8.0']), '8',
np.where(df['5818-0'].isin(['>12']), '12',
np.where(df['5818-0'].isin(['NotDone']), np.nan, df['5818-0']))))
df['5822-2'] = np.where(df['5822-2'].isin(['0', 'N']), 'NONE',
np.where(df['5822-2'].isin(['NOTDONE']), np.nan, df['5822-2']))
df['778-1'] = np.where(df['778-1'].isin(['UNABLE TO ESTIMATE DUE TO PLATELET CLUMPS']), 'NOTDETECTED', df['778-1'])
# Subset columns
columns = ['hadm_id'] + categorical
df = df[columns].copy()
return df
def clean_static_demog_vars(df, staticvars):
df['admission_location'] = \
np.where(df['admission_location'].isin(['** INFO NOT AVAILABLE **']), 'EMERGENCY ROOM ADMIT',
np.where(df['admission_location'].isin(['TRANSFER FROM SKILLED NUR','TRANSFER FROM OTHER HEALT',
'TRANSFER FROM HOSP/EXTRAM']), 'TRANSFER FROM MED FACILITY',df['admission_location']))
df['language'] = \
np.where(~df['language'].isin(['ENGL','SPAN']),'OTHER',df['language'])
df['religion'] = \
np.where(~df['religion'].isin(['CATHOLIC','NOT SPECIFIED','UNOBTAINABLE','PROTESTANT QUAKER','JEWISH']),'OTHER',
np.where(df['religion'].isin(['UNOBTAINABLE']),'NOT SPECIFIED', df['religion'] ))
df['ethnicity'] = \
np.where(df['ethnicity'].isin(['ASIAN - CHINESE',
'ASIAN - ASIAN INDIAN',
'ASIAN - VIETNAMESE',
'ASIAN - OTHER',
'ASIAN - FILIPINO',
'ASIAN - CAMBODIAN']), 'ASIAN',
np.where(df['ethnicity'].isin(['WHITE - RUSSIAN',
'WHITE - BRAZILIAN',
'WHITE - OTHER EUROPEAN']),'WHITE',
np.where(df['ethnicity'].isin(['BLACK/CAPE VERDEAN',
'BLACK/HAITIAN',
'BLACK/AFRICAN']), 'BLACK/AFRICAN AMERICAN',
np.where(df['ethnicity'].isin(['HISPANIC/LATINO - PUERTO RICAN',
'HISPANIC/LATINO - DOMINICAN',
'HISPANIC/LATINO - SALVADORAN',
'HISPANIC/LATINO - CUBAN',
'HISPANIC/LATINO - MEXICAN']), 'HISPANIC OR LATINO',
np.where(df['ethnicity'].isin(['MULTI RACE ETHNICITY',
'MIDDLE EASTERN',
'PORTUGUESE',
'AMERICAN INDIAN/ALASKA NATIVE',
'NATIVE HAWAIIAN OR OTHER PACIFIC ISLANDER',
'AMERICAN INDIAN/ALASKA NATIVE FEDERALLY RECOGNIZED TRIBE']), 'OTHER',
np.where(df['ethnicity'].isin(['UNABLE TO OBTAIN',
'PATIENT DECLINED TO ANSWER']), 'UNKNOWN/NOT SPECIFIED',
df['ethnicity']))))))
# Subset columns
columns = ['hadm_id'] + staticvars
df = df[columns].copy()
return df
def clean_text(df, text_field='text'):
df['clean_tx'] = df['text'].str.lower()
df['clean_tx'] = df['clean_tx'].apply(lambda elem: re.sub(r"(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)|^rt|http.+?", "", elem))
df['clean_tx'] = df['clean_tx'].apply(lambda elem: re.sub(r"\d+", "", elem))
return df
if __name__ == '__main__':
# ESBL Cohort:
pts_labels = cohort_creation(params.observation_window_hours)
print('ESBL COHORT:')
print(pts_labels['hadm_id'].nunique())
print(pts_labels['RESISTANT_YN'].value_counts())
print('--------------------------------------------------------------')
# Antibiotics prescriptions:
rx = prescriptions(pts_labels['hadm_id'], params.observation_window_hours)
rx['value'] = 1
rx = rx[['hadm_id','drug','value']]
# Drop duplicated records
rx = rx.drop_duplicates(subset=['hadm_id','drug'], keep='first')
# One-hot encoder for prescriptions
onehotrx_df = rx.pivot_table(index='hadm_id', columns='drug', values='value', fill_value=0).reset_index()
print('RX records: ', onehotrx_df.shape)
print('--------------------------------------------------------------')
# Previous admissions:
admits = previous_admissions(pts_labels['hadm_id'])
admits_df = admits.groupby('hadm_id').agg({'prev_hadm_id':'nunique'}).reset_index()
admits_df = admits_df.rename(columns={'prev_hadm_id':'n_admits'})
print('Previous admits: ', admits_df.shape)
print('--------------------------------------------------------------')
# Open Wounds Diagnosis:
wounds = open_wounds_diags(pts_labels['hadm_id'])
wounds['wounds'] = 1 # wounds indicator column
# Group on hand_id & drop icd9 code column
wounds_df = wounds.drop_duplicates(subset=['hadm_id'], keep = 'first')
wounds_df = wounds_df.drop(columns='icd9_code')
print('Open wounds: ', wounds_df.shape)
print('--------------------------------------------------------------')
# Intubation procedures:
intubation = intubation_cpt(pts_labels['hadm_id'])
intubation['intubation'] = 1 # intubation indicator column
# Group on hand_id & drop cpt code and date columns
intubation = intubation.drop_duplicates(subset=['hadm_id'], keep = 'first')
intubation = intubation.drop(columns=['chartdate', 'cpt_cd'])
print('Intubation records: ', intubation.shape)
print('--------------------------------------------------------------')
# Note Events:
notes = noteevents(pts_labels['hadm_id'], params.observation_window_hours)
# Clean notes
notes = clean_text(df=notes, text_field='text')
# List antibiotics
antibitics_list = ['antibiotic', 'antibiotics','amikacin', 'ampicillin', 'sulbactam',
'cefazolin', 'cefepime', 'cefpodoxime', 'ceftazidime',
'ceftriaxone', 'cefuroxime', 'chloramphenicol', 'ciprofloxacin',
'clindamycin', 'daptomycin', 'erythromycin', 'gentamicin', 'imipenem',
'levofloxacin', 'linezolid', 'meropenem', 'nitrofurantoin', 'oxacillin',
'penicillin', 'pen<NAME>', 'piperacillin', 'tazobactam',
'rifampin', 'tetracycline', 'tobramycin', 'trimethoprim', 'vancomycin']
notes_check = pd.DataFrame()
for n, df in notes.groupby('row_id'):
# using list comprehension to check if string contains list element
res = [ele for ele in antibitics_list if(ele in df['clean_tx'].values[0])]
if len(res) >=1:
# print(len(res))
# print(df['clean_tx'].values[0])
data = | pd.DataFrame({'row_id': [n], 'hadm_id': [df['hadm_id'].values[0]], 'antibiotic_yn': [1]}) | pandas.DataFrame |
#!/usr/bin/env python3
# Simple utility for plotting CSV outputs from YCSB-runner
import os
import sys
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', family='sans-serif')
from pandas import read_csv, concat
from matplotlib import pyplot as plt
# TODO: Make these configurable using command-line args
PLOT_FIELDS = ["anomaly_score"]
INDEX_FIELD = "mpl"
GROUP_FIELD = "trial"
PLOT_STYLE = "o-"
def plot(fname):
g = read_csv(fname).groupby(GROUP_FIELD)
dfs = [g.get_group(x) for x in g.groups]
for i, f in enumerate(dfs):
# Drop fields which we don't want to plot (i.e. anything other than
# PLOT_FIELDS)
dfs[i] = f.set_index(INDEX_FIELD).drop([k for k,v in f.iteritems() if
k not in PLOT_FIELDS and k != INDEX_FIELD], axis=1)
# Create a final DF, renaming columns, and calculating the mean
final_df = | concat(dfs, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Project : PyCoA
Date : april 2020 - march 2021
Authors : <NAME>, <NAME>, <NAME>
Copyright ©pycoa.fr
License: See joint LICENSE file
Module : coa.display
About :
-------
An interface module to easily plot pycoa data with bokeh
"""
from coa.tools import kwargs_test, extract_dates, verb, get_db_list_dict
from coa.error import *
import math
import pandas as pd
import geopandas as gpd
import numpy as np
from collections import defaultdict
import itertools
import json
import io
from io import BytesIO
import base64
from IPython import display
import copy
import locale
from bokeh.models import ColumnDataSource, TableColumn, DataTable, ColorBar, LogTicker,\
HoverTool, CrosshairTool, BasicTicker, GeoJSONDataSource, LinearColorMapper, LogColorMapper,Label, \
PrintfTickFormatter, BasicTickFormatter, NumeralTickFormatter, CustomJS, CustomJSHover, Select, \
Range1d, DatetimeTickFormatter, Legend, LegendItem, Text
from bokeh.models.widgets import Tabs, Panel
from bokeh.plotting import figure
from bokeh.layouts import row, column, gridplot
from bokeh.palettes import Category10, Category20, Viridis256
from bokeh.models import Title
from bokeh.io import export_png
from bokeh import events
from bokeh.models.widgets import DateSlider
from bokeh.models import LabelSet, WMTSTileSource
from bokeh.transform import transform, cumsum
import shapely.geometry as sg
import branca.colormap
from branca.colormap import LinearColormap
from branca.element import Element, Figure
import folium
from PIL import Image
import coa.geo as coge
import matplotlib.pyplot as plt
import datetime as dt
import bisect
from functools import wraps
from IPython.core.display import display, HTML
width_height_default = [500, 380]
MAXCOUNTRIESDISPLAYED = 27
class CocoDisplay:
def __init__(self, db=None, geo = None):
verb("Init of CocoDisplay() with db=" + str(db))
self.database_name = db
self.dbld = get_db_list_dict()
self.lcolors = Category20[20]
self.scolors = Category10[5]
self.ax_type = ['linear', 'log']
self.geom = []
self.geopan = gpd.GeoDataFrame()
self.location_geometry = None
self.boundary_metropole = None
self.listfigs = []
self.options_stats = ['when','input','input_field']
self.options_charts = [ 'bins']
self.options_front = ['where','option','which','what','visu']
self.available_tiles = ['openstreet','esri','stamen']
self.available_modes = ['mouse','vline','hline']
self.uptitle, self.subtitle = ' ',' '
self.dfigure_default = {'plot_height':width_height_default[1] ,'plot_width':width_height_default[0],'title':None,'textcopyright':'default'}
self.dvisu_default = {'mode':'mouse','tile':self.available_tiles[0],'orientation':'horizontal','cursor_date':None,'maplabel':None,'guideline':False}
self.when_beg = dt.date(1, 1, 1)
self.when_end = dt.date(1, 1, 1)
self.alloptions = self.options_stats + self.options_charts + self.options_front + list(self.dfigure_default.keys()) +\
list(self.dvisu_default.keys()) + ['resumetype']
self.iso3country = self.dbld[self.database_name][0]
self.granularity = self.dbld[self.database_name][1]
self.namecountry = self.dbld[self.database_name][2]
try:
if self.granularity != 'nation':
self.geo = coge.GeoCountry(self.iso3country)
if self.granularity == 'region':
self.location_geometry = self.geo.get_region_list()[['code_region', 'name_region', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_region': 'location'})
if self.iso3country == 'PRT':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='PT.99']
self.boundary_metropole =tmp['geometry'].total_bounds
if self.iso3country == 'FRA':
tmp=self.location_geometry.rename(columns={'name_region': 'location'})
tmp = tmp.loc[tmp.code_region=='999']
self.boundary_metropole =tmp['geometry'].total_bounds
elif self.granularity == 'subregion':
list_dep_metro = None
self.location_geometry = self.geo.get_subregion_list()[['code_subregion', 'name_subregion', 'geometry']]
self.location_geometry = self.location_geometry.rename(columns={'name_subregion': 'location'})
#if country == 'FRA':
# list_dep_metro = geo.get_subregions_from_region(name='Métropole')
#elif country == 'ESP':
# list_dep_metro = geo.get_subregions_from_region(name='España peninsular')
#if list_dep_metro:
# self.boundary_metropole = self.location_geometry.loc[self.location_geometry.code_subregion.isin(list_dep_metro)]['geometry'].total_bounds
else:
self.geo=coge.GeoManager('name')
geopan = gpd.GeoDataFrame()#crs="EPSG:4326")
info = coge.GeoInfo()
allcountries = self.geo.get_GeoRegion().get_countries_from_region('world')
geopan['location'] = [self.geo.to_standard(c)[0] for c in allcountries]
geopan = info.add_field(field=['geometry'],input=geopan ,geofield='location')
geopan = gpd.GeoDataFrame(geopan, geometry=geopan.geometry, crs="EPSG:4326")
geopan = geopan[geopan.location != 'Antarctica']
geopan = geopan.dropna().reset_index(drop=True)
self.location_geometry = geopan
except:
raise CoaTypeError('What data base are you looking for ?')
''' FIGURE COMMUN FOR ALL '''
def standardfig(self, **kwargs):
"""
Create a standard Bokeh figure, with pycoa.fr copyright, used in all the bokeh charts
"""
plot_width = kwargs.get('plot_width', self.dfigure_default['plot_width'])
plot_height = kwargs.get('plot_height', self.dfigure_default['plot_height'])
textcopyright = kwargs.get('textcopyright', self.dfigure_default['textcopyright'])
if textcopyright == 'default':
textcopyright = '©pycoa.fr (data from: {})'.format(self.database_name)
else:
textcopyright = '©pycoa.fr ' + textcopyright
citation = Label(x=0.65 * plot_width - len(textcopyright), y=0.01 * plot_height,
x_units='screen', y_units='screen',
text_font_size='1.5vh', background_fill_color='white', background_fill_alpha=.75,
text=textcopyright)
for i in list(self.dvisu_default.keys()) + self.options_front + self.options_charts + ['textcopyright'] + self.options_stats + ['date_slider']:
if i in kwargs.keys():
kwargs.pop(i)
kwargs.pop('title')
fig = figure(**kwargs, tools=['save', 'box_zoom,reset'], toolbar_location="right")
#fig.add_layout(citation)
fig.add_layout(Title(text=self.uptitle, text_font_size="10pt"), 'above')
fig.add_layout(Title(text=self.subtitle, text_font_size="8pt", text_font_style="italic"), 'below')
return fig
def get_listfigures(self):
return self.listfigs
def set_listfigures(self,fig):
if not isinstance(fig,list):
fig = [fig]
self.listfigs = fig
''' WRAPPER COMMUN FOR ALL'''
def decowrapper(func):
'''
Main decorator it mainly deals with arg testings
'''
@wraps(func)
def wrapper(self, input = None, input_field = None, **kwargs):
"""
Parse a standard input, return :
- pandas: with location keyword (eventually force a column named 'where' to 'location')
- kwargs:
* keys = [plot_width, plot_width, title, when, title_temporal,bins, what, which]
Note that method used only the needed variables, some of them are useless
"""
if not isinstance(input, pd.DataFrame):
raise CoaTypeError(input + 'Must be a pandas, with pycoa structure !')
kwargs_test(kwargs, self.alloptions, 'Bad args used in the display function.')
when = kwargs.get('when', None)
which = kwargs.get('which', input.columns[2])
if input_field and 'cur_' in input_field:
what = which
else:
# cumul is the default
what = kwargs.get('what', which)
if input_field is None:
input_field = which
if isinstance(input_field,list):
test = input_field[0]
else:
test = input_field
if input[[test,'date']].isnull().values.all():
raise CoaKeyError('All values for '+ which + ' is nan nor empty')
option = kwargs.get('option', None)
bins = kwargs.get('bins', 10)
title = kwargs.get('title', None)
#textcopyright = kwargs.get('textcopyright', 'default')
kwargs['plot_width'] = kwargs.get('plot_width', self.dfigure_default['plot_width'])
kwargs['plot_height'] = kwargs.get('plot_height', self.dfigure_default['plot_height'])
if 'where' in input.columns:
input = input.rename(columns={'where': 'location'})
if 'codelocation' and 'clustername' not in input.columns:
input['codelocation'] = input['location']
input['clustername'] = input['location']
input['rolloverdisplay'] = input['location']
input['permanentdisplay'] = input['location']
else:
if self.granularity == 'nation' :
#input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace('[', '').replace(']', '') if len(x)< 10 else x[0]+'...'+x[-1] )
input['permanentdisplay'] = input.apply(lambda x: x.clustername if self.geo.get_GeoRegion().is_region(x.clustername) else str(x.codelocation), axis = 1)
else:
if self.granularity == 'subregion' :
input = input.reset_index(drop=True)
if isinstance(input['codelocation'][0],list):
input['codelocation'] = input['codelocation'].apply(lambda x: str(x).replace("'", '')\
if len(x)<5 else '['+str(x[0]).replace("'", '')+',...,'+str(x[-1]).replace("'", '')+']')
trad={}
cluster = input.clustername.unique()
if isinstance(input.location[0],list):
cluster = [i for i in cluster]
for i in cluster:
if i == self.namecountry:
input['permanentdisplay'] = input.clustername #[self.dbld[self.database_name][2]]*len(input)
else:
if self.geo.is_region(i):
trad[i] = self.geo.is_region(i)
elif self.geo.is_subregion(i):
trad[i] = self.geo.is_subregion(i)#input.loc[input.clustername==i]['codelocation'].iloc[0]
else:
trad[i] = i
trad={k:(v[:3]+'...'+v[-3:] if len(v)>8 else v) for k,v in trad.items()}
if ',' in input.codelocation[0]:
input['permanentdisplay'] = input.clustername
else:
input['permanentdisplay'] = input.codelocation#input.clustername.map(trad)
elif self.granularity == 'region' :
if all(i == self.namecountry for i in input.clustername.unique()):
input['permanentdisplay'] = [self.namecountry]*len(input)
else:
input['permanentdisplay'] = input.codelocation
input['rolloverdisplay'] = input['location']
maplabel = kwargs.get('maplabel', None)
if maplabel and 'unsorted' in maplabel:
pass
else:
input = input.sort_values(by=input_field, ascending = False).reset_index(drop=True)
uniqloc = input.clustername.unique()
if len(uniqloc) < 5:
colors = self.scolors
else:
colors = self.lcolors
colors = itertools.cycle(colors)
dico_colors = {i: next(colors) for i in uniqloc}
input = input.copy()
if not 'colors' in input.columns:
input.loc[:,'colors'] = input['clustername'].map(dico_colors)#(pd.merge(input, country_col, on='location'))
if not isinstance(input_field, list):
input_field = [input_field]
else:
input_field = input_field
col2=which
when_beg = input[[col2,'date']].date.min()
when_end = input[[col2,'date']].date.max()
if when:
when_beg, when_end = extract_dates(when)
if when_end > input[[col2,'date']].date.max():
when_end = input[[col2,'date']].date.max()
if when_beg == dt.date(1, 1, 1):
when_beg = input[[col2,'date']].date.min()
if not isinstance(when_beg, dt.date):
raise CoaNoData("With your current cuts, there are no data to plot.")
if when_end <= when_beg:
print('Requested date below available one, take', when_beg)
when_end = when_beg
if when_beg > input[[col2,'date']].date.max() or when_end > input[[col2,'date']].date.max():
raise CoaNoData("No available data after "+str(input[[input_field[0],'date']].date.max()))
when_end_change = when_end
for i in input_field:
if input[i].isnull().all():
raise CoaTypeError("Sorry all data are NaN for " + i)
else:
when_end_change = min(when_end_change,CocoDisplay.changeto_nonull_date(input, when_end, i))
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if when_end_change != when_end:
when_end = when_end_change
self.when_beg = when_beg
self.when_end = when_end
input = input.loc[(input['date'] >= self.when_beg) & (input['date'] <= self.when_end)]
title_temporal = ' (' + 'between ' + when_beg.strftime('%d/%m/%Y') + ' and ' + when_end.strftime('%d/%m/%Y') + ')'
if func.__name__ not in ['pycoa_date_plot', 'pycoa_plot', 'pycoa_scrollingmenu', 'pycoa_spiral_plot','pycoa_yearly_plot']:
title_temporal = ' (' + when_end.strftime('%d/%m/%Y') + ')'
title_option=''
if option:
if 'sumallandsmooth7' in option:
option.remove('sumallandsmooth7')
option += ['sumall','smooth7']
title_option = ' (option: ' + str(option)+')'
input_field_tostring = str(input_field).replace('[', '').replace(']', '').replace('\'', '')
whichtitle = which
if 'pop' in input_field_tostring:
whichtitle = input_field_tostring.replace('weekly ','').replace('daily ','')
if 'daily' in input_field_tostring:
titlefig = whichtitle + ', ' + 'day to day difference' + title_option
elif 'weekly' in input_field_tostring:
titlefig = whichtitle + ', ' + 'week to week difference' + title_option
else:
if 'cur_' in which or 'idx_' in which:
#titlefig = which + ', ' + 'current ' + which.replace('cur_','').replace('idx_','')+ title_option
titlefig = whichtitle + ', current value' + title_option
else:
titlefig = whichtitle + ', cumulative'+ title_option
if title:
title = title
else:
title = titlefig
self.uptitle = title
textcopyright = kwargs.get('textcopyright', None)
if textcopyright:
textcopyright = '©pycoa.fr ' + textcopyright + title_temporal
kwargs.pop('textcopyright')
else:
textcopyright = '©pycoa.fr data from: {}'.format(self.database_name)+' '+title_temporal
self.subtitle = textcopyright
kwargs['title'] = title+title_temporal
return func(self, input, input_field, **kwargs)
return wrapper
@decowrapper
def pycoa_resume_data(self, input, input_field, **kwargs):
loc=list(input['clustername'].unique())
input['cases'] = input[input_field]
resumetype = kwargs.get('resumetype','spiral')
if resumetype == 'spiral':
dspiral={i:CocoDisplay.spiral(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(dspiral)
elif resumetype == 'spark':
spark={i:CocoDisplay.sparkline(input.loc[ (input.clustername==i) &
(input.date >= self.when_beg) &
(input.date <= self.when_end)].sort_values(by='date')) for i in loc}
input['resume']=input['clustername'].map(spark)
else:
raise CoaError('pycoa_resume_data can use spiral or spark ... here what ?')
input = input.loc[input.date==input.date.max()].reset_index(drop=True)
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
input=input.drop(columns=['permanentdisplay','rolloverdisplay','colors','cases'])
input=input.apply(lambda x: x.round(2) if x.name in [input_field,'daily','weekly'] else x)
if isinstance(input['location'][0], list):
col=[i for i in list(input.columns) if i not in ['clustername','location','codelocation']]
col.insert(0,'clustername')
input = input[col]
input=input.set_index('clustername')
else:
input = input.drop(columns='clustername')
input=input.set_index('location')
return input.to_html(escape=False,formatters=dict(resume=path_to_image_html))
''' DECORATORS FOR PLOT: DATE, VERSUS, SCROLLINGMENU '''
def decoplot(func):
"""
decorator for plot purpose
"""
@wraps(func)
def inner_plot(self, input = None, input_field = None, **kwargs):
mode = kwargs.get('mode', None)
if mode:
mode = mode
else:
mode = self.dvisu_default['mode']
if mode not in self.available_modes:
raise CoaTypeError('Don\'t know the mode wanted. So far:' + str(self.available_modes))
kwargs['mode'] = mode
if 'location' in input.columns:
location_ordered_byvalues = list(
input.loc[input.date == self.when_end].sort_values(by=input_field, ascending=False)['clustername'].unique())
input = input.copy() # needed to avoid warning
input.loc[:,'clustername'] = pd.Categorical(input.clustername,
categories=location_ordered_byvalues, ordered=True)
input = input.sort_values(by=['clustername', 'date']).reset_index(drop = True)
if func.__name__ != 'pycoa_scrollingmenu' :
if len(location_ordered_byvalues) >= MAXCOUNTRIESDISPLAYED:
input = input.loc[input.clustername.isin(location_ordered_byvalues[:MAXCOUNTRIESDISPLAYED])]
list_max = []
for i in input_field:
list_max.append(max(input.loc[input.clustername.isin(location_ordered_byvalues)][i]))
if len([x for x in list_max if not np.isnan(x)]) > 0:
amplitude = (np.nanmax(list_max) - np.nanmin(list_max))
if amplitude > 10 ** 4:
self.ax_type.reverse()
if func.__name__ == 'pycoa_scrollingmenu' :
if isinstance(input_field,list):
if len(input_field) > 1:
print(str(input_field) + ' is dim = ' + str(len(input_field)) + '. No effect with ' + func.__name__ + '! Take the first input: ' + input_field[0])
input_field = input_field[0]
if self.dbld[self.database_name][1] == 'nation' and self.dbld[self.database_name][0] != 'WW':
func.__name__ = 'pycoa_date_plot'
return func(self, input, input_field, **kwargs)
return inner_plot
''' PLOT VERSUS '''
@decowrapper
@decoplot
def pycoa_plot(self, input = None, input_field = None ,**kwargs):
'''
-----------------
Create a versus plot according to arguments.
See help(pycoa_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element. It should be a list dim=2. Moreover the 2 variables must be present
in the DataFrame considered.
- plot_heigh = width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
if len(input_field) != 2:
raise CoaTypeError('Two variables are needed to plot a versus chart ... ')
panels = []
cases_custom = CocoDisplay.rollerJS()
if self.get_listfigures():
self.set_listfigures([])
listfigs=[]
for axis_type in self.ax_type:
standardfig = self.standardfig( x_axis_label = input_field[0], y_axis_label = input_field[1],
y_axis_type = axis_type, **kwargs )
standardfig.add_tools(HoverTool(
tooltips=[('Location', '@rolloverdisplay'), ('date', '@date{%F}'),
(input_field[0], '@{casesx}' + '{custom}'),
(input_field[1], '@{casesy}' + '{custom}')],
formatters={'location': 'printf', '@{casesx}': cases_custom, '@{casesy}': cases_custom,
'@date': 'datetime'}, mode = kwargs['mode'],
point_policy="snap_to_data")) # ,PanTool())
for loc in input.clustername.unique():
pandaloc = input.loc[input.clustername == loc].sort_values(by='date', ascending='True')
pandaloc.rename(columns={input_field[0]: 'casesx', input_field[1]: 'casesy'}, inplace=True)
standardfig.line(x='casesx', y='casesy',
source=ColumnDataSource(pandaloc), legend_label=pandaloc.clustername.iloc[0],
color=pandaloc.colors.iloc[0], line_width=3, hover_line_width=4)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title=axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
listfigs.append(standardfig)
CocoDisplay.bokeh_legend(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs=panels)
return tabs
''' DATE PLOT '''
@decowrapper
@decoplot
def pycoa_date_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime',**kwargs)
i = 0
r_list=[]
maxou=-1000
lcolors = iter(self.lcolors)
line_style = ['solid', 'dashed', 'dotted', 'dotdash','dashdot']
for val in input_field:
for loc in list(input.clustername.unique()):
input_filter = input.loc[input.clustername == loc].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = input_filter.clustername[0]
#leg = input_filter.permanentdisplay[0]
if len(input_field)>1:
leg = input_filter.permanentdisplay[0] + ', ' + val
if len(list(input.clustername.unique())) == 1:
color = next(lcolors)
else:
color = input_filter.colors[0]
r = standardfig.line(x = 'date', y = val, source = src,
color = color, line_width = 3,
legend_label = leg,
hover_line_width = 4, name = val, line_dash=line_style[i%4])
r_list.append(r)
maxou=max(maxou,np.nanmax(input_filter[val].values))
i += 1
for r in r_list:
label = r.name
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
standardfig.legend.label_text_font_size = '8pt'
if len(input_field) > 1 and len(input_field)*len(input.clustername.unique())>16:
standardfig.legend.visible=False
standardfig.xaxis.formatter = DatetimeTickFormatter(
days = ["%d/%m/%y"], months = ["%d/%m/%y"], years = ["%b %Y"])
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' SPIRAL PLOT '''
@decowrapper
@decoplot
def pycoa_spiral_plot(self, input = None, input_field = None, **kwargs):
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
panels = []
listfigs = []
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
borne=300
kwargs.pop('plot_width')
standardfig = self.standardfig(y_axis_type = None, x_axis_type = None,
width=kwargs['plot_height'], x_range=[-borne, borne], y_range=[-borne, borne], match_aspect=True,**kwargs)
if len(input.clustername.unique()) > 1 :
print('Can only display spiral for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
input['date']=pd.to_datetime(input["date"])
input["dayofyear"]=input.date.dt.dayofyear
input['year']=input.date.dt.year
input['cases'] = input[input_field]
K = 2*input[input_field].max()
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input["dayofyear_angle"] = input["dayofyear"]*2 * np.pi/365
input["r_baseline"] = input.apply(lambda x : ((x["year"]-2020)*2 * np.pi + x["dayofyear_angle"])*K,axis=1)
size_factor = 16
input["r_cas_sup"] = input.apply(lambda x : x["r_baseline"] + 0.5*x[input_field]*size_factor,axis=1)
input["r_cas_inf"] = input.apply(lambda x : x["r_baseline"] - 0.5*x[input_field]*size_factor,axis=1)
radius = 200
def polar(theta,r,norm=radius/input["r_baseline"].max()):
x = norm*r*np.cos(theta)
y = norm*r*np.sin(theta)
return x,y
x_base,y_base=polar(input["dayofyear_angle"],input["r_baseline"])
x_cas_sup,y_cas_sup=polar(input["dayofyear_angle"],input["r_cas_sup"])
x_cas_inf,y_cas_inf=polar(input["dayofyear_angle"],input["r_cas_inf"])
xcol,ycol=[],[]
[ xcol.append([i,j]) for i,j in zip(x_cas_inf,x_cas_sup)]
[ ycol.append([i,j]) for i,j in zip(y_cas_inf,y_cas_sup)]
standardfig.patches(xcol,ycol,color='blue',fill_alpha = 0.5)
src = ColumnDataSource(data=dict(
x=x_base,
y=y_base,
date=input['date'],
cases=input['cases']
))
standardfig.line( x = 'x', y = 'y', source = src, legend_label = input.clustername[0],
line_width = 3, line_color = 'blue')
circle = standardfig.circle('x', 'y', size=2, source=src)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'},
renderers=[circle],
point_policy="snap_to_data")
standardfig.add_tools(hover_tool)
outer_radius=250
[standardfig.annular_wedge(
x=0, y=0, inner_radius=0, outer_radius=outer_radius, start_angle=i*np.pi/6,\
end_angle=(i+1)*np.pi/6,fill_color=None,line_color='black',line_dash='dotted')
for i in range(12)]
label = ['January','February','March','April','May','June','July','August','September','October','November','December']
xr,yr = polar(np.linspace(0, 2 * np.pi, 13),outer_radius,1)
standardfig.text(xr[:-1], yr[:-1], label,text_font_size="9pt", text_align="center", text_baseline="middle")
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
return standardfig
''' SCROLLINGMENU PLOT '''
@decowrapper
@decoplot
def pycoa_scrollingmenu(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot, with a scrolling menu location, according to arguments.
See help(pycoa_scrollingmenu).
Keyword arguments
-----------------
len(location) > 2
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
-guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
mode = kwargs.get('mode',self.dvisu_default['mode'])
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
uniqloc = input.clustername.unique().to_list()
uniqloc.sort()
if 'location' in input.columns:
if len(uniqloc) < 2:
raise CoaTypeError('What do you want me to do ? You have selected, only one country.'
'There is no sens to use this method. See help.')
input = input[['date', 'clustername', input_field]]
input = input.sort_values(by='clustername', ascending = True).reset_index(drop=True)
mypivot = pd.pivot_table(input, index='date', columns='clustername', values=input_field)
column_order = uniqloc
mypivot = mypivot.reindex(column_order, axis=1)
source = ColumnDataSource(mypivot)
filter_data1 = mypivot[[uniqloc[0]]].rename(columns={uniqloc[0]: 'cases'})
src1 = ColumnDataSource(filter_data1)
filter_data2 = mypivot[[uniqloc[1]]].rename(columns={uniqloc[1]: 'cases'})
src2 = ColumnDataSource(filter_data2)
cases_custom = CocoDisplay.rollerJS()
hover_tool = HoverTool(tooltips=[('Cases', '@cases{0,0.0}'), ('date', '@date{%F}')],
formatters={'Cases': 'printf', '@{cases}': cases_custom, '@date': 'datetime'}, mode = mode,
point_policy="snap_to_data") # ,PanTool())
panels = []
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type, x_axis_type = 'datetime', **kwargs)
standardfig.yaxis[0].formatter = PrintfTickFormatter(format = "%4.2e")
standardfig.add_tools(hover_tool)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
def add_line(src, options, init, color):
s = Select(options = options, value = init)
r = standardfig.line(x = 'date', y = 'cases', source = src, line_width = 3, line_color = color)
li = LegendItem(label = init, renderers = [r])
s.js_on_change('value', CustomJS(args=dict(s0=source, s1=src, li=li),
code="""
var c = cb_obj.value;
var y = s0.data[c];
s1.data['cases'] = y;
li.label = {value: cb_obj.value};
s1.change.emit();
"""))
return s, li
s1, li1 = add_line(src1, uniqloc, uniqloc[0], self.scolors[0])
s2, li2 = add_line(src2, uniqloc, uniqloc[1], self.scolors[1])
standardfig.add_layout(Legend(items = [li1, li2]))
standardfig.legend.location = 'top_left'
layout = row(column(row(s1, s2), row(standardfig)))
panel = Panel(child = layout, title = axis_type)
panels.append(panel)
tabs = Tabs(tabs = panels)
label = standardfig.title
return tabs
''' YEARLY PLOT '''
@decowrapper
@decoplot
def pycoa_yearly_plot(self, input = None, input_field = None, **kwargs):
'''
-----------------
Create a date plot according to arguments. See help(pycoa_date_plot).
Keyword arguments
-----------------
- input = None : if None take first element. A DataFrame with a Pycoa struture is mandatory
|location|date|Variable desired|daily|cumul|weekly|codelocation|clustername|permanentdisplay|rolloverdisplay|
- input_field = if None take second element could be a list
- plot_heigh= width_height_default[1]
- plot_width = width_height_default[0]
- title = None
- textcopyright = default
- mode = mouse
- guideline = False
- cursor_date = None if True
- orientation = horizontal
- when : default min and max according to the inpude DataFrame.
Dates are given under the format dd/mm/yyyy.
when format [dd/mm/yyyy : dd/mm/yyyy]
if [:dd/mm/yyyy] min date up to
if [dd/mm/yyyy:] up to max date
'''
guideline = kwargs.get('guideline',self.dvisu_default['guideline'])
if len(input.clustername.unique()) > 1 :
print('Can only display yearly plot for ONE location. I took the first one:', input.clustername[0])
input = input.loc[input.clustername == input.clustername[0]].copy()
panels = []
listfigs = []
cases_custom = CocoDisplay.rollerJS()
input['date']=pd.to_datetime(input["date"])
#drop bissextile fine tuning in needed in the future
input = input.loc[~(input['date'].dt.month.eq(2) & input['date'].dt.day.eq(29))].reset_index(drop=True)
input = input.copy()
input.loc[:,'allyears']=input['date'].apply(lambda x : x.year)
input['allyears'] = input['allyears'].astype(int)
input.loc[:,'dayofyear']= input['date'].apply(lambda x : x.dayofyear)
allyears = list(input.allyears.unique())
if isinstance(input['rolloverdisplay'][0],list):
input['rolloverdisplay'] = input['clustername']
if len(input_field)>1:
CoaError('Only one variable could be displayed')
else:
input_field=input_field[0]
for axis_type in self.ax_type:
standardfig = self.standardfig( y_axis_type = axis_type,**kwargs)
i = 0
r_list=[]
maxou=-1000
input['cases']=input[input_field]
line_style = ['solid', 'dashed', 'dotted', 'dotdash']
colors = itertools.cycle(self.lcolors)
for loc in list(input.clustername.unique()):
for year in allyears:
input_filter = input.loc[(input.clustername == loc) & (input['date'].dt.year.eq(year))].reset_index(drop = True)
src = ColumnDataSource(input_filter)
leg = loc + ' ' + str(year)
r = standardfig.line(x = 'dayofyear', y = input_field, source = src,
color = next(colors), line_width = 3,
legend_label = leg,
hover_line_width = 4, name = input_field)
maxou=max(maxou,np.nanmax(input_filter[input_field].values))
label = input_field
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), ('Cases', '@cases{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode']) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
if axis_type == 'linear':
if maxou < 1e4 :
standardfig.yaxis.formatter = BasicTickFormatter(use_scientific=False)
standardfig.legend.label_text_font_size = "12px"
panel = Panel(child=standardfig, title = axis_type)
panels.append(panel)
standardfig.legend.background_fill_alpha = 0.6
standardfig.legend.location = "top_left"
standardfig.legend.click_policy="hide"
labelspd=input.loc[(input.allyears.eq(2021)) & (input.date.dt.day.eq(1))]
standardfig.xaxis.ticker = list(labelspd['dayofyear'].astype(int))
replacelabelspd = labelspd['date'].apply(lambda x: str(x.strftime("%b")))
#label_dict = dict(zip(input.loc[input.allyears.eq(2020)]['daymonth'],input.loc[input.allyears.eq(2020)]['date'].apply(lambda x: str(x.day)+'/'+str(x.month))))
standardfig.xaxis.major_label_overrides = dict(zip(list(labelspd['dayofyear'].astype(int)),list(replacelabelspd)))
CocoDisplay.bokeh_legend(standardfig)
listfigs.append(standardfig)
tooltips = [('Location', '@rolloverdisplay'), ('date', '@date{%F}'), (r.name, '@$name{0,0.0}')]
formatters = {'location': 'printf', '@date': 'datetime', '@name': 'printf'}
hover=HoverTool(tooltips = tooltips, formatters = formatters, point_policy = "snap_to_data", mode = kwargs['mode'], renderers=[r]) # ,PanTool())
standardfig.add_tools(hover)
if guideline:
cross= CrosshairTool()
standardfig.add_tools(cross)
self.set_listfigures(listfigs)
tabs = Tabs(tabs = panels)
return tabs
''' DECORATORS FOR HISTO VERTICAL, HISTO HORIZONTAL, PIE & MAP'''
def decohistomap(func):
"""
Decorator function used for histogram and map
"""
@wraps(func)
def inner_hm(self, input = None, input_field = None, **kwargs):
tile = kwargs.get('tile', self.dvisu_default['tile'])
maplabel = kwargs.get('maplabel', None)
if not isinstance(maplabel,list):
maplabel=[maplabel]
#if maplabel:
# maplabel = maplabel
if 'map' in func.__name__:
kwargs['maplabel'] = maplabel
orientation = kwargs.get('orientation', self.dvisu_default['orientation'])
cursor_date = kwargs.get('cursor_date', None)
#if orientation:
# kwargs['orientation'] = orientation
#kwargs['cursor_date'] = kwargs.get('cursor_date', self.dvisu_default['cursor_date'])
if isinstance(input['location'].iloc[0],list):
input['rolloverdisplay'] = input['clustername']
input = input.explode('location')
else:
input['rolloverdisplay'] = input['location']
uniqloc = input.clustername.unique()
geopdwd = input
if maplabel and 'unsorted' in maplabel:
pass
else:
geopdwd = geopdwd.sort_values(by=input_field, ascending = False).reset_index(drop=True)
started = geopdwd.date.min()
ended = geopdwd.date.max()
if cursor_date:
date_slider = DateSlider(title = "Date: ", start = started, end = ended,
value = ended, step=24 * 60 * 60 * 1000, orientation = orientation)
#wanted_date = date_slider.value_as_datetime.date()
#if func.__name__ == 'pycoa_mapfolium' or func.__name__ == 'pycoa_map' or func.__name__ == 'innerdecomap' or func.__name__ == 'innerdecopycoageo':
if func.__name__ in ['pycoa_mapfolium','pycoa_map','pycoageo' ,'pycoa_pimpmap']:
if isinstance(input.location.to_list()[0],list):
geom = self.location_geometry
geodic={loc:geom.loc[geom.location==loc]['geometry'].values[0] for loc in geopdwd.location.unique()}
geopdwd['geometry'] = geopdwd['location'].map(geodic)
else:
geopdwd = pd.merge(geopdwd, self.location_geometry, on='location')
kwargs['tile'] = tile
if self.iso3country in ['USA']:#['FRA','USA']
geo = copy.deepcopy(self.geo)
d = geo._list_translation
if func.__name__ != 'pycoa_mapfolium':
if any(i in list(geopdwd.codelocation.unique()) for i in d.keys()) \
or any(True for i in d.keys() if ''.join(list(geopdwd.codelocation.unique())).find(i)!=-1):
geo.set_dense_geometry()
kwargs.pop('tile')
else:
geo.set_main_geometry()
d = {}
new_geo = geo.get_data()[['name_'+self.granularity,'geometry']]
new_geo = new_geo.rename(columns={'name_'+self.granularity:'location'})
new_geo = new_geo.set_index('location')['geometry'].to_dict()
geopdwd['geometry'] = geopdwd['location'].map(new_geo)
geopdwd = gpd.GeoDataFrame(geopdwd, geometry=geopdwd.geometry, crs="EPSG:4326")
if func.__name__ == 'pycoa_histo':
pos = {}
new = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""Tests for `qnorm` package."""
import unittest
import numpy as np
import pandas as pd
import qnorm
import tracemalloc
tracemalloc.start()
df1 = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
df1.to_csv("test.csv")
df1.to_hdf("test.hdf", key="qnorm", format="table", data_columns=True, mode="w")
df1.to_parquet("test.parquet")
class TestQnorm(unittest.TestCase):
def test_000_numpy(self):
"""
test numpy support
"""
arr = np.random.normal(size=(20, 2))
qnorm.quantile_normalize(arr)
def test_001_pandas(self):
"""
test pandas support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
qnorm.quantile_normalize(df)
def test_002_wiki(self):
"""
test the wiki example
https://en.wikipedia.org/wiki/Quantile_normalization
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df).values, result
)
def test_003_no_change(self):
"""
no sorting should happen here
"""
arr = np.empty(shape=(20, 3))
for col in range(arr.shape[1]):
vals = np.arange(arr.shape[0])
np.random.shuffle(vals)
arr[:, col] = vals
qnorm_arr = qnorm.quantile_normalize(arr)
np.testing.assert_array_almost_equal(arr, qnorm_arr)
def test_004_double(self):
"""
if dtype is double, return double
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float64)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float64
def test_005_single(self):
"""
if dtype is single, return single
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float32)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float32
def test_006_target(self):
"""
test if the target is used instead of the qnorm values
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
target = np.arange(10, 20)
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_007_target_notsorted(self):
"""
make sure an unsorted target gets sorted first
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
# take the reverse, which should be sorted by qnorm
target = np.arange(10, 20)[::-1]
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_008_short_target(self):
"""
test if an error is raised with a invalid sized target
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
target = np.arange(10, 15)
self.assertRaises(ValueError, qnorm.quantile_normalize, arr, target)
def test_009_wiki_ncpus(self):
"""
test if an error is raised with a invalid sized target
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, ncpus=10).values, result
)
def test_010_axis_numpy(self):
"""
test numpy axis support
"""
arr = np.random.normal(size=(50, 4))
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr.T, axis=0).T,
qnorm.quantile_normalize(arr, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr, axis=1),
qnorm.quantile_normalize(arr.T, axis=0).T,
)
def test_011_axis_pandas(self):
"""
test numpy axis support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df.T, axis=0).T,
qnorm.quantile_normalize(df, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, axis=1),
qnorm.quantile_normalize(df.T, axis=0).T,
)
def test_012_from_csv(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.csv", "test_out.csv")
df1 = pd.read_csv("test.csv", index_col=0, header=0)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_013_from_csv_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", rowchunksize=rowchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_014_from_csv_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", colchunksize=colchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_015_from_csv_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv",
"test_out.csv",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_016_from_csv_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(index=range(5000), columns=range(100))
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_csv("test_large.csv")
qnorm.incremental_quantile_normalize(
"test_large.csv",
"test_large_out.csv",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_csv("test_large_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_017_from_hdf(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.hdf", "test_out.hdf")
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_018_from_hdf_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", rowchunksize=rowchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_019_from_hdf_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", colchunksize=colchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_020_from_hdf_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf",
"test_out.hdf",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_021_from_hdf_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(
index=range(5000),
columns=["sample" + str(col) for col in range(100)],
dtype=int,
)
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_hdf(
"test_large.hdf", key="qnorm", format="table", data_columns=True
)
qnorm.incremental_quantile_normalize(
"test_large.hdf",
"test_large_out.hdf",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_hdf("test_large_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_022(self):
"""
Test another array, not just wiki example.
"""
df = pd.DataFrame(
{
"C1": {
"A": 2.0,
"B": 2.0,
"C": 2.0,
"D": 2.0,
"E": 6.0,
"F": 1.0,
},
"C2": {
"A": 2.0,
"B": 2.0,
"C": 1.0,
"D": 3.5,
"E": 5.0,
"F": 1.0,
},
}
)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df).values,
np.array(
[
[2.0625, 2.0],
[2.0625, 2.0],
[2.0625, 1.25],
[2.0625, 2.75],
[5.5, 5.5],
[1.0, 1.25],
]
),
)
def test_023_from_parquet(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.parquet", "test_out.parquet")
df1 = pd.read_parquet("test.parquet")
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_024_from_parquet_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", rowchunksize=rowchunksize
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_025_from_parquet_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", colchunksize=colchunksize
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_026_from_parquet_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_parquet("test.parquet")
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet",
"test_out.parquet",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = | pd.read_parquet("test_out.parquet") | pandas.read_parquet |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from skimage.io import imread
from skimage.transform import resize
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
with tf.device('/gpu:1'):
################################## Load Robot data ##################################################################
Arm2_CS_State = pd.read_csv('/home/kiyanoushs/KiyanoushCodes/NeedleInsertion/Data/Arm2_CS_new.csv', header=None)
Arm2_NS_State = pd.read_csv('/home/kiyanoushs/KiyanoushCodes/NeedleInsertion/Data/Arm2_NS_new.csv', header=None)
robot_state_train_input = Arm2_CS_State[0:50244][:]
print("Robot state input trainingset size: {}".format(robot_state_train_input.shape))
robot_state_train_label = Arm2_NS_State[0:50244][:]
print("Robot state label trainingset size: {}".format(robot_state_train_label.shape))
robot_state_test_input = Arm2_CS_State[50244:][:]
print("Robot state input testset size: {}".format(robot_state_test_input.shape))
robot_state_test_label = Arm2_NS_State[50244:][:]
print("Robot state label testset size: {}".format(robot_state_test_label.shape))
################################## Standardization ###################################################################
CS_train_names = robot_state_train_input.columns
NS_train_names = robot_state_train_label.columns
CS_test_names = robot_state_test_input.columns
NS_test_names = robot_state_test_label.columns
scaler = preprocessing.StandardScaler()
input_Scaler = scaler.fit(robot_state_train_input)
output_Scaler = scaler.fit(robot_state_train_label)
robot_state_train_input = input_Scaler.transform(robot_state_train_input)
robot_state_train_label = output_Scaler.transform(robot_state_train_label)
robot_state_test_input = input_Scaler.transform(robot_state_test_input)
robot_state_test_label = output_Scaler.transform(robot_state_test_label)
robot_state_train_input = pd.DataFrame(robot_state_train_input, columns=CS_train_names)
robot_state_train_label = pd.DataFrame(robot_state_train_label, columns=NS_train_names)
robot_state_test_input = pd.DataFrame(robot_state_test_input, columns=CS_test_names)
robot_state_test_label = pd.DataFrame(robot_state_test_label, columns=NS_test_names)
robot_state_train_input = np.array(robot_state_train_input)
robot_state_train_label = np.array(robot_state_train_label)
robot_state_test_input = np.array(robot_state_test_input)
robot_state_test_label = np.array(robot_state_test_label)
############################################### Load image data #####################################################
X_train_filenames = | pd.read_csv('/home/kiyanoushs/KiyanoushCodes/NeedleInsertion/Data/trainImageName.csv', header=None) | pandas.read_csv |
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
isna,
)
import pandas._testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame["A"][:5] = np.nan
frame["B"][5:10] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
expected.loc["B", "A"] = np.nan
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
'''
Trains the example run of the OOS Markov-Switching Autoregression forecasting on Federal Reserve data.
Note that the t/t+1 prediction timing conventions are *not* the exact same between statsmodels' Autoregression,
Markov Autoregression, and Exponential Smoother, so walk-forward validation is handled in separate methods for the 3.
'''
import io
import logging
import pickle
from pathlib import Path
import boto3
import matplotlib.lines as mlines
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.stattools import adfuller
from pandas.plotting import register_matplotlib_converters
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
# custom built class for OOS t+1 prediction
from MarkovExtension import MSARExtension
BUCKET = 'macro-forecasting1301' # s3 bucket name
DATA_SAMPLE_PERCENT = 0.825 # % of sample to use
VALIDATION_SAMPLE_PERCENT = 0.7 # 1-% to use for validation in walk-fwd
VAR = 'CLAIMSx' # unemployment claims
RECESSION_START = pd.Timestamp(2007, 1, 10) # NBER defined onset of recession period
class ClassicalModels():
'''Loads up series, standardizes.
Fits AR model, then a 2 regime Markov Switching model, then Exp Smoother,
saves validation set forecasts, models, and error metrics'''
def __init__(self, logger):
self.logger = logger
sns.set(style="white")
register_matplotlib_converters()
self.s3_client = boto3.client('s3')
self.graphics_path = Path(__file__).resolve().parents[2].joinpath('reports').resolve().joinpath('figures').resolve()
self.data_path = Path(__file__).resolve().parents[2].joinpath('data').resolve().joinpath('processed').resolve()
self.models_path = Path(__file__).resolve().parents[2].joinpath('models').resolve()
self.error_metrics_exp = {}
self.forecasts_exp = {}
self.EXP_models = {}
self.error_metrics_AR = {}
self.forecasts_AR = {}
self.AR_models = {}
self.error_metrics_Markov = {}
self.forecasts_Markov = {}
self.MKV_models = {}
self.scaler = StandardScaler()
def get_data(self):
'''Reads in csv from s3'''
obj = self.s3_client.get_object(Bucket=BUCKET, Key='features.csv')
self.features_df = pd.read_csv(io.BytesIO(obj['Body'].read()))
self.logger.info('loaded data...')
def save_model(self, pkl_name, item):
'''Helper function for saving the latest model after train/val'''
pth = Path(self.models_path, pkl_name).with_suffix('.pkl')
with open(pth, 'wb') as handle:
pickle.dump(item, handle)
def filter_data(self):
'''Removes the post-08 data up front.'''
nobs = len(self.features_df)
n_init_training_val = int(nobs * DATA_SAMPLE_PERCENT)
self.train_val_df = self.features_df.iloc[0:n_init_training_val, :]
self.endog = self.train_val_df[VAR]
# Get the number of initial training observations
self.nobs = len(self.endog)
self.n_init_training = int(self.nobs * VALIDATION_SAMPLE_PERCENT)
def examine_autocorr_stationary(self):
'''The stationarity procedures per the authors of this dataset should have
made stationary. Visual examination of autocorrelation.'''
fig = plot_acf(self.endog, lags=36)
pth = Path(self.graphics_path, 'acf_plot').with_suffix('.png')
fig.savefig(pth)
self.logger.info(
'plotted and saved png file in /reports/figures of autocorrelation plot of variable'
)
result = adfuller(self.endog)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
if result[1] < 0.05:
print('series is stationary')
else:
print('series is still not stationary')
self.logger.info('calculated augmented Dickey-Fuller test for stationary')
def train_AR(self):
'''Trains state space AR process already integrated as baseline.
Optimizes lag number by calculating MSE on validation during walk-forward training.'''
def __train_one_lag(ll):
forecasts = {}
# Create model for initial training sample, fit parameters
training_endog = self.endog.iloc[:self.n_init_training]
training_endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(training_endog.values.reshape(-1, 1)))
mod = sm.tsa.SARIMAX(training_endog_preprocessed, order=(ll, 0, 0), trend='c') # 1 lag, already stationary
res = mod.fit(disp=0)
# Save initial forecast
forecasts[self.train_val_df.iloc[self.n_init_training, 1]] = self.scaler.inverse_transform(
res.predict(
start=len(training_endog_preprocessed),
end=len(training_endog_preprocessed)
)
)[0]
# Step through the rest of the sample
for t in range(self.n_init_training, self.nobs-1):
# Update the results by appending the next observation
endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(self.endog.iloc[0:t+1].values.reshape(-1, 1))) # re fit
mod = sm.tsa.SARIMAX(endog_preprocessed, order=(ll, 0, 0), trend='c')
res = mod.fit(disp=0) # re-fit
# Save the new set of forecasts, inverse the scaler
forecasts[self.train_val_df.iloc[t+1, 1]] = self.scaler.inverse_transform(res.predict(start=len(endog_preprocessed), end=len(endog_preprocessed)))[0]
# save the model at end of time series
if t == self.nobs-2:
self.AR_models['lag_'+str(ll)] = res
# Combine all forecasts into a dataframe
forecasts = pd.DataFrame(forecasts.items(), columns=['sasdate', 't_forecast'])
actuals = self.train_val_df.tail(forecasts.shape[0])[['sasdate', VAR]]
actuals.columns = ['sasdate', 't_actual']
self.SS_AR_forecasts = pd.merge(forecasts, actuals, on='sasdate', how='inner')
self.SS_AR_forecasts['sasdate'] = pd.to_datetime(self.SS_AR_forecasts['sasdate'])
# error storage
self.error_metrics_AR[ll] = mean_squared_error(self.SS_AR_forecasts['t_actual'], self.SS_AR_forecasts['t_forecast'])
# forecast storage
self.forecasts_AR['lag_'+str(ll)] = {
'df': self.SS_AR_forecasts
}
self.logger.info('completed training for AR model with lag: '+str(ll))
[__train_one_lag(lag_value) for lag_value in range(1, 13)]
self.save_model('AR_models', self.AR_models)
def train_MarkovSwitch_AR(self):
'''Trains Markov Switching autoregression on univariate series.
Allows for time varying covariance. Uses walk forward validation to tune lag order similar to AR.'''
def __train_one_lag(ll):
forecasts = {}
# Create model for initial training sample, fit parameters
training_endog = self.endog.iloc[:self.n_init_training]
training_endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(training_endog.values.reshape(-1, 1)))
mod = sm.tsa.MarkovAutoregression(training_endog_preprocessed,
k_regimes=2,
order=ll,
switching_variance=False,
)
res = mod.fit()
res_extended = MSARExtension(res) # pass the trained model to OOS forecaster
yhat = res_extended.predict_out_of_sample()
# Save initial forecast
forecasts[self.train_val_df.iloc[self.n_init_training, 1]] = self.scaler.inverse_transform(yhat.ravel())[0]
# Step through the rest of the sample
for t in range(self.n_init_training, self.nobs-1):
# Update the results by appending the next observation
endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(self.endog.iloc[0:t+1].values.reshape(-1, 1))) # re fit
mod = sm.tsa.MarkovAutoregression(endog_preprocessed,
k_regimes=2,
order=ll,
switching_variance=False
)
res = mod.fit()
res_extended = MSARExtension(res)
yhat = res_extended.predict_out_of_sample()
# Save the new set of forecasts, inverse the scaler
forecasts[self.train_val_df.iloc[t+1, 1]] = self.scaler.inverse_transform(yhat.ravel())[0]
# save the model at end of time series
if t == self.nobs-2:
self.MKV_models['lag_'+str(ll)] = res
# Combine all forecasts into a dataframe
forecasts = pd.DataFrame(forecasts.items(), columns=['sasdate', 't_forecast'])
actuals = self.train_val_df.tail(forecasts.shape[0])[['sasdate', VAR]]
actuals.columns = ['sasdate', 't_actual']
self.Markov_fcasts = pd.merge(forecasts, actuals, on='sasdate', how='inner').dropna()
self.Markov_fcasts['sasdate'] = pd.to_datetime(self.Markov_fcasts['sasdate'])
# error storage
self.error_metrics_Markov[ll] = mean_squared_error(self.Markov_fcasts['t_actual'], self.Markov_fcasts['t_forecast'])
# forecast storage
self.forecasts_Markov['lag_'+str(ll)] = {
'df': self.Markov_fcasts
}
self.logger.info('completed training for Markov Switching model with lag: '+str(ll))
[__train_one_lag(lag_value) for lag_value in range(1, 13)]
self.save_model('MKV_models', self.MKV_models)
def train_exponential_smoother(self):
'''Trains Holt's Exponential Smoothing model. Allows for dampened trend, seasonality.'''
forecasts = {}
# Create model for initial training sample, fit parameters
training_endog = self.endog.iloc[:self.n_init_training]
training_endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(training_endog.values.reshape(-1, 1)))
mod = ExponentialSmoothing(training_endog_preprocessed,
trend='add',
seasonal='add',
seasonal_periods=12
)
res = mod.fit()
# Save initial forecast
forecasts[self.train_val_df.iloc[self.n_init_training+1, 1]] = self.scaler.inverse_transform(
res.predict()
)[len(res.predict())-1]
# Step through the rest of the sample
for t in range(self.n_init_training, self.nobs-1):
# Update the results by appending the next observation
endog_preprocessed = pd.DataFrame(self.scaler.fit_transform(self.endog.iloc[0:t+1].values.reshape(-1, 1)))
dates = pd.DataFrame(self.train_val_df.iloc[0:t+1, 1].values.reshape(-1, 1))
mod = ExponentialSmoothing(endog_preprocessed,
trend='add',
seasonal='add',
seasonal_periods=12
)
res = mod.fit()
# Save the new set of forecasts, inverse the scaler
forecasts[self.train_val_df.iloc[t+1, 1]] = self.scaler.inverse_transform(res.predict())[len(res.predict())-1]
# save the model at end of time series
if t == self.nobs-1:
self.EXP_models['exp_weigh_lag_struct'] = res
# Combine all forecasts into a dataframe
forecasts = pd.DataFrame(forecasts.items(), columns=['sasdate', 't_forecast'])
actuals = pd.concat([self.endog.tail(forecasts.shape[0]), dates.tail(forecasts.shape[0])], axis=1)
actuals.columns = ['t_actual', 'sasdate']
self.Expsmooth_fcasts = pd.merge(forecasts, actuals, on='sasdate', how='inner').dropna()
self.Expsmooth_fcasts['sasdate'] = | pd.to_datetime(self.Expsmooth_fcasts['sasdate']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import cupy as cp
from numpy.lib import index_tricks
from cupyx.scipy.special import ndtr
from cupy.core import internal
from cupy.linalg import inv
from numpy.linalg import inv
from scipy.stats import truncnorm
from scipy.stats import norm
from scipy.stats import invgamma
from scipy.stats import gamma
from numpy.random import random
from scipy.stats import multivariate_normal
import copy
from data import Data
from initialize import Initialize
from PMCMC import PMCMC
import timeit
start = timeit.default_timer()
# model
### A: without determinants
### B: determinants on mean
### C: determinants on variance
### D: determinants on mean and variance
model = 'D'
#method
### PMCMC: Particle Metropolis Within Gibbs sampler (choose gpu parallel computation)
### TK: Two_parametrization method (Tsiona and Kunmbhaker(2014))
### DA: data_augmentation
method = 'PMCMC'
H = 10000 #number of particles
S = 11000 # simulation length
N = 100 # number of individual
T = 10 # time period
data_name ='group6'
transient_determinants = ['ROA']
persistent_determinants = ['E/A']
#data
if data_name =="":
y,x,w,pi = Data().simulate(N=100,T=10)
else:
y,x,w,pi = Data().preprcessing(data_name, persistent_determinants, transient_determinants)
#run
if metohd == 'PMCMC':
estimator = PMCMC(y,x,w,pi,H,gpu=True)
elif method == 'TK':
estimator = TK(y,x,w,pi)
elif mehtod == 'DA':
estimator = DA(y,x,w,pi)
s_beta, s_xi, s_delta, s_z, s_gamma, s_sigma_alpha_sqr, s_sigma_v_sqr, s_alpha,s_eta, s_u = estimator.run(S,N,T,model)
#store results
name = 'sf2_with_panel' +'_'+ data_name +'_'+ 'model' + model + '_' + 'gpuH' + str(H) + '_12y_4_EAineta_ROAinu'
pd.DataFrame(all_beta[1000:,:]).to_csv(name +'_'+'beta')
pd.DataFrame(all_xi[1000:,:]).to_csv(name +'_'+'xi')
pd.DataFrame(all_delta[1000:,:]).to_csv(name +'_'+'delta')
pd.DataFrame(all_gamma[1000:,:]).to_csv(name +'_'+'gamma')
pd.DataFrame(all_z[1000:,:]).to_csv(name +'_'+'z')
pd.DataFrame(all_sigma_v_sqr[1000:]).to_csv(name +'_'+'sigma_v_sqr')
pd.DataFrame(all_sigma_alpha_sqr[1000:]).to_csv(name +'_'+'sigma_alpha_sqr')
pd.DataFrame(all_alpha[1000:,:]).to_csv(name +'_'+'alpha')
| pd.DataFrame(all_eta[1000:,:]) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from sklearn.metrics import mean_squared_error, confusion_matrix
import pandas as pd
import numpy as np
import seaborn as sns
from scipy.stats import linregress, ttest_ind
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from itertools import combinations
from statsmodels.sandbox.stats.multicomp import multipletests
def _custom_palettes():
return {
'YellowOrangeBrown': 'YlOrBr',
'YellowOrangeRed': 'YlOrRd',
'OrangeRed': 'OrRd',
'PurpleRed': 'PuRd',
'RedPurple': 'RdPu',
'BluePurple': 'BuPu',
'GreenBlue': 'GnBu',
'PurpleBlue': 'PuBu',
'YellowGreen': 'YlGn',
'summer': 'summer_r',
'copper': 'copper_r',
'viridis': 'viridis_r',
'plasma': 'plasma_r',
'inferno': 'inferno_r',
'magma': 'magma_r',
'sirocco': sns.cubehelix_palette(
dark=0.15, light=0.95, as_cmap=True),
'drifting': sns.cubehelix_palette(
start=5, rot=0.4, hue=0.8, as_cmap=True),
'melancholy': sns.cubehelix_palette(
start=25, rot=0.4, hue=0.8, as_cmap=True),
'enigma': sns.cubehelix_palette(
start=2, rot=0.6, gamma=2.0, hue=0.7, dark=0.45, as_cmap=True),
'eros': sns.cubehelix_palette(start=0, rot=0.4, gamma=2.0, hue=2,
light=0.95, dark=0.5, as_cmap=True),
'spectre': sns.cubehelix_palette(
start=1.2, rot=0.4, gamma=2.0, hue=1, dark=0.4, as_cmap=True),
'ambition': sns.cubehelix_palette(start=2, rot=0.9, gamma=3.0, hue=2,
light=0.9, dark=0.5, as_cmap=True),
'mysteriousstains': sns.light_palette(
'baby shit green', input='xkcd', as_cmap=True),
'daydream': sns.blend_palette(
['egg shell', 'dandelion'], input='xkcd', as_cmap=True),
'solano': sns.blend_palette(
['pale gold', 'burnt umber'], input='xkcd', as_cmap=True),
'navarro': sns.blend_palette(
['pale gold', 'sienna', 'pine green'], input='xkcd', as_cmap=True),
'dandelions': sns.blend_palette(
['sage', 'dandelion'], input='xkcd', as_cmap=True),
'deepblue': sns.blend_palette(
['really light blue', 'petrol'], input='xkcd', as_cmap=True),
'verve': sns.cubehelix_palette(
start=1.4, rot=0.8, gamma=2.0, hue=1.5, dark=0.4, as_cmap=True),
'greyscale': sns.blend_palette(
['light grey', 'dark grey'], input='xkcd', as_cmap=True)}
def _regplot_from_dataframe(x, y, plot_style="whitegrid", arb=True,
color="grey"):
'''Seaborn regplot with true 1:1 ratio set by arb (bool).'''
sns.set_style(plot_style)
reg = sns.regplot(x, y, color=color)
plt.xlabel('True value')
plt.ylabel('Predicted value')
if arb is True:
x0, x1 = reg.axes.get_xlim()
y0, y1 = reg.axes.get_ylim()
lims = [min(x0, y0), max(x1, y1)]
reg.axes.plot(lims, lims, ':k')
return reg
def _lmplot_from_dataframe(metadata, column, predicted_column, group_by,
plot_style="whitegrid"):
sns.set_style(plot_style)
g = sns.lmplot(column, predicted_column, data=metadata,
hue=group_by, fit_reg=False,
scatter_kws={"marker": ".", "s": 100}, legend=False)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return g
def _boxplot_from_dataframe(metadata, column, dep, group_by,
plot_style="whitegrid"):
sns.set_style(plot_style)
ax = sns.boxplot(x=column, y=dep, hue=group_by, data=metadata)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylim(metadata[dep].min(), metadata[dep].max())
return ax
def _clustermap_from_dataframe(table, metadata, group_by, column,
metric='correlation', method='weighted',
plot_style="whitegrid"):
sns.set_style(plot_style)
table = metadata[[group_by, column]].merge(
table, left_index=True, right_index=True)
table = table.groupby(by=[group_by, column]).median()
# remove any empty columns
table = table.loc[:, (table != 0).any(axis=0)]
# generate cluster map
g = sns.clustermap(table, metric=metric, method=method, z_score=1,
row_cluster=False)
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
return g
def _filter_metadata_to_table_ids(table, metadata, dep, time, group_by):
table = metadata[[dep, time, group_by]].merge(
table, left_index=True, right_index=True)
table = table[[dep, time, group_by]].dropna()
return table
def _two_way_anova(table, metadata, dep, time, group_by):
'''pd.DataFrame -> pd.DataFrame of AOV and OLS summary'''
# Prep data
table = _filter_metadata_to_table_ids(table, metadata, dep, time, group_by)
# remove whitespace from column names
table = table.rename(columns=lambda x: x.replace(' ', '_'))
dep = dep.replace(' ', '_')
time = time.replace(' ', '_')
group_by = group_by.replace(' ', '_')
# AOV
mod = ols(formula='{0} ~ {1} * {2}'.format(dep, time, group_by),
data=table).fit()
aov_table = anova_lm(mod, typ=2)
return aov_table, mod.summary2()
def _pairwise_stats(table, metadata, dep, time, group_by):
'''pd.DataFrame -> pd.DataFrame
Perform pairwise t-tests on all groups in group_by and time columns.
'''
# Prep data
table = _filter_metadata_to_table_ids(table, metadata, dep, time, group_by)
# find and store all valid subgroups' distributions of dependent var dep
distributions = []
for tp in table[time].unique():
tab_tp = table[table[time] == tp]
for group in tab_tp[group_by].unique():
tab_group = tab_tp[tab_tp[group_by] == group][dep]
distributions.append((tp, group, tab_group))
# compare all distributions
p_vals = []
for combo in combinations(distributions, 2):
try:
t, p = ttest_ind(combo[0][2], combo[1][2], nan_policy='raise')
p_vals.append(
((combo[0][0], combo[0][1]), (combo[1][0], combo[1][1]), t, p))
except ValueError:
pass
result = | pd.DataFrame(p_vals, columns=["Group A", "Group B", "t", "P"]) | pandas.DataFrame |
import networkx as nx
import community
import demon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import random
import re
from collections import Counter
def prune_pages(links, categories):
'''Remove pages dedicated to numbers and identifiers.
Parameters:
dict links: dictionary of the form str -> set(str), containing links
of Wikipedia pages from key to pages in the set.
dict categories: dictionary of the form str -> set(str), containing
Wikipedia categories for pages.
Return:
dict new_links: same as links, but all pages with "(number)" and "(identifier)"
in the title have been removed both as a key and inside any set.
dict categories: same as input categories, but with "(number)" and "(identifier)"
pages removed. NOTE: not the copy of input but the same thing, ie
original 'categories' gets modified.
'''
# New links to return
new_links = dict()
# Loop over links
for key in links:
if "(identifier)" in key or "(number)" in key:
# Remove page from categories also
del categories[key]
else:
new_set = set()
for link in links[key]:
# Skip unnecessary pages
if "(identifier)" in link or "(number)" in link: continue
else: new_set.add(link)
new_links[key] = new_set
return new_links, categories
def prune_categories(categories, prune_dates=False):
''' Remove wikipedia meta-categories from pages' categories.
Parameters:
dict categories: dictionary of the form str -> set(str) containing
wikipedia categories for each pages (pages are keys)
bool prune_dates: if True, prune also categories related to dates.
(default False)
Return:
dict of the form str -> set(str), similar to input but with categories pruned
'''
to_return = dict() # New dict for pruned categories
# Regexes needed to prune
prune = re.compile(r"[Aa]rticle|[Pp]ages|Wiki|Use \w* dates|Use .*English|[Tt]emplate")
dates = re.compile(r"\d\d\d\d|century")
# Iterate over data
for page, cats in categories.items():
new_cats = set()
for cat in cats:
# Skip category if macted by pruning regexes
if prune.search(cat): continue
if prune_dates and dates.search(cat): continue
else: new_cats.add(cat)
# Save pruned categories
to_return[page] = new_cats
return to_return
def make_random_as(G):
''' Create Erdos-Renyi and Barabasi-Albert random graph using given graph G.
Parameters:
nx.Graph G: Graph whose number of edges and nodes will be used for generation.
Return:
nx.Graph ER_graph, BA_graph: generated graphs.
'''
n = len(G.nodes) # number of nodes
l = len(G.edges) # number of edges
m = int(l/n) # number of edges to add in preferential attachement
p = l/(n*(n-1)) # probaility for edge to exist
ER_graph = nx.erdos_renyi_graph(n, p, seed=42, directed=True)
BA_graph = nx.barabasi_albert_graph(n, m, seed=42)
return ER_graph, BA_graph
def community_discovery(D):
''' Run several community discovery methods.
Parameters:
nx.DiGraph D: Graph to be studied.
Returns:
dict results: results in a hierarcical order -
keys in results are 'k-clqiue', 'louvain' and 'demon', corresponding
to the methods used, in each there is a dictionary with keys:
'size_distribution': dict from sizes of communities to their counts
'communities': container of sets corresponding to communities
'node_participation': dict from nodes to amount of communties they
are in (except Louvain, which makes separated communities)
'modularity': value of the modularity of partition (Louvain only)
'maximal_community: the biggest community discovered
'''
G = D.to_undirected(as_view=True)
results = dict()
## K-clique
print("Starting K-clique...")
comm_kclique = nx.algorithms.community.k_clique_communities(G, k=15)
comm_kclique = list(comm_kclique)
print("K-cliques found.")
dist_kclique = Counter()
nums_kclique = Counter()
max_size = 0
max_kclique = []
for comm in comm_kclique:
size = len(comm)
dist_kclique[size] += 1
if size > max_size:
max_size = size
max_kclique = [comm]
elif size == max_size:
max_kclique.append(comm)
for node in G.nodes():
if node in comm: nums_kclique[node] += 1
print("K-clique results calculated")
results["k-clique"] = dict(size_distribution=dist_kclique, communities=comm_kclique,
node_participation=nums_kclique,
maximal_community=max_kclique)
## Louvain
print("Starting Louvain...")
partition = community.best_partition(G)
print("Louvain partition found.")
modularity_louvain = community.modularity(partition, G)
print("Louvain modularity calculated")
comm_louvain = dict()
for node, comm_id in partition.items():
if comm_id not in comm_louvain:
comm_louvain[comm_id] = set(node)
else:
comm_louvain[comm_id].add(node)
comm_louvain = list(comm_louvain.values())
dist_louvain = Counter()
max_size = 0
max_louvain = []
for comm in comm_louvain:
size = len(comm)
dist_louvain[size] += 1
if size > max_size:
max_size = size
max_louvain = [comm]
elif size == max_size:
max_louvain.append(comm)
print("Louvain results calculated.")
results["louvain"] = dict(size_distribution=dist_louvain, communities=comm_louvain,
modularity=modularity_louvain, maximal_community=max_louvain)
## Demon
print("Starting Demon...")
results_demon = dict()
for epsilon in [0.25, 0.5, 0.75]:
print("Calculating Demon for epsilon =", epsilon)
dm = demon.Demon(graph=G, epsilon=epsilon, min_community_size=4)
comm_demon = dm.execute()
print("Demon communities found.")
dist_demon = Counter()
nums_demon = Counter()
max_size = 0
max_demon = []
for comm in comm_demon:
size = len(comm)
dist_demon[size] += 1
if size > max_size:
max_size = size
max_demon = [comm]
elif size == max_size:
max_demon.append(comm)
for node in G.nodes():
if node in comm: nums_demon[node] += 1
results_demon[epsilon] =dict(size_distribution=dist_demon, communities=comm_demon,
node_participation=nums_demon,
maximal_community=max_demon)
print("Demon results calculated.")
results["demon"] = results_demon
return results
def spreading(G, beta=0.05, gamma = 0.1, SIR=False, t_max=1000, patient_zero='Mathematics'):
''' Simulate SIS/SIR model on a graph.
Parameters:
nx.DiGraph G: graph to be used in simulation,
float beta: infection rate, between 0.0 and 1.0,
float gamma: recovery rate, between 0.0 and 1.0,
bool SIR: if True, simulate SIR process, else SIS process,
int t_max: run simulation until this timestamp,
object patient_zero: node to be used as the first infected
Returns:
Counter infection_progress_s: amount of people turned susceptible at time t
Counter infection_progress_i: amount of people turned infected at time t
Counter infection_progress_r: amount of people turned recovered at time t
Raises:
ValueError: if patient_zero was not found in the nodes of the graph
'''
# Initialize all nodes, find patient_zero
infection_started = False
for node in G.nodes:
if node == patient_zero:
G.nodes[node]['Infected'] = 'I'
infection_started = True
else:
G.nodes[node]['Infected'] = 'S'
if not infection_started:
raise ValueError("Patient zero node was not found")
# Initialize s,i,r counts at time t=0
infection_progress_i = Counter()
infection_progress_s = Counter()
infection_progress_r = Counter()
infection_progress_r[0] = 0
infection_progress_i[0] = 0
infection_progress_s[0] = len(G.nodes)
t = 1
while t<t_max:
to_infect = set()
for node, adj in G.adjacency():
# Infected node infects its' neighbours...
if G.nodes[node]['Infected'] == 'I':
# ... if they are susceptible and the chance allows
infects = {n for n in adj if G.nodes[n]['Infected'] == 'S'
and random.random()<beta}
to_infect.update(infects)
infection_progress_i[t] = len(to_infect)
for node in G.nodes():
# Infect the nodes
if node in to_infect:
G.nodes[node]['Infected'] = 'I'
infection_progress_s[t] -= 1
# 'Recover' the nodes with probability gamma
elif G.nodes[node]['Infected'] == 'I' and random.random() < gamma:
if SIR:
# Node is recovered
G.nodes[node]['Infected'] = 'R'
infection_progress_r[t] += 1
else:
# Node is susceptible
G.nodes[node]['Infected'] = 'S'
infection_progress_s[t] += 1
infection_progress_i[t] -= 1
# This ensures that there are values in counters for all t's
# (otherwise the plots might be cut)
infection_progress_s[t] += 0
infection_progress_i[t] += 0
infection_progress_r[t] += 0
t += 1
return infection_progress_s,infection_progress_i,infection_progress_r
def spreading_experiment(G, name, beta, gamma, SIR, t_max, n_max):
'''Carry out simulations of spreading processes and plot results.
Parameters:
nx.Graph G - graph to simulate the spreading process,
str name - name of the graph to be used in plots,
float beta - infection rate of the process,
float gamma - recovery rate of the process,
bool SIR - if True, simulate SIR process, otherwise SIS,
int t_max - maximum number of timesteps to simulate,
int n_max - number of simulations to perform for averaging
Returns:
None, figure of the average simulation process is saved to disk
'''
s_av = Counter()
i_av = Counter()
r_av = Counter()
# First infected node is Mathematics for wiki network and 1 for random
patient_zero = "Mathematics" if name=="wiki" else 1
# Carry out n_max experiments
for n in range(n_max):
s,i,r = spreading(G, beta=beta, gamma=gamma, SIR=SIR, t_max=t_max, patient_zero=patient_zero)
# Update values in s, i, r counters, looping is necessary
# since Python summation operator would ignore negative counts
for key in s:
s_av[key] += s[key]
for key in i:
i_av[key] += i[key]
for key in r:
r_av[key] += r[key]
# Normalize by population and by amount of experiments
norm_const = len(G.nodes)*n_max
for key in s_av:
s_av[key] /= norm_const
for key in i_av:
i_av[key] /= norm_const
if SIR:
for key in r_av:
r_av[key] /= norm_const
# Make a new figure
fig = plt.figure()
# Take cumulative sums of s,i,r and add to figure
s_av = pd.Series(s_av).sort_index()
s_av.cumsum().plot(logy=False, marker='.')
i_av = pd.Series(i_av).sort_index()
i_av.cumsum().plot(logy=False, marker='.')
if SIR:
r_av = | pd.Series(r_av) | pandas.Series |
import argparse
from collections import defaultdict
import cv2
from enum import Enum
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import seaborn as sns
import scipy.stats
import tensorflow as tf
from tqdm import tqdm
import edl
import models
parser = argparse.ArgumentParser()
parser.add_argument("--load-pkl", action='store_true',
help="Load predictions for a cached pickle file or \
recompute from scratch by feeding the data through \
trained models")
args = parser.parse_args()
class Model(Enum):
GroundTruth = "GroundTruth"
Dropout = "Dropout"
Ensemble = "Ensemble"
Evidential = "Evidential"
save_dir = "pretrained_models"
trained_models = {
Model.Dropout: [
"dropout/trial1.h5",
"dropout/trial2.h5",
"dropout/trial3.h5",
],
Model.Ensemble: [
"ensemble/trial1_*.h5",
"ensemble/trial2_*.h5",
"ensemble/trial3_*.h5",
],
Model.Evidential: [
"evidence/trial1.h5",
"evidence/trial2.h5",
"evidence/trial3.h5",
],
}
output_dir = "figs/depth"
def compute_predictions(batch_size=50, n_adv=9):
(x_in, y_in), (x_ood, y_ood) = load_data()
datasets = [(x_in, y_in, False), (x_ood, y_ood, True)]
df_pred_image = pd.DataFrame(
columns=["Method", "Model Path", "Input",
"Target", "Mu", "Sigma", "Adv. Mask", "Epsilon", "OOD"])
adv_eps = np.linspace(0, 0.04, n_adv)
for method, model_path_list in trained_models.items():
for model_i, model_path in enumerate(model_path_list):
full_path = os.path.join(save_dir, model_path)
model = models.load_depth_model(full_path, compile=False)
model_log = defaultdict(list)
print(f"Running {model_path}")
for x, y, ood in datasets:
# max(10,x.shape[0]//500-1)
for start_i in tqdm(np.arange(0, 3*batch_size, batch_size)):
inds = np.arange(start_i, min(start_i+batch_size, x.shape[0]-1))
x_batch = x[inds]/np.float32(255.)
y_batch = y[inds]/np.float32(255.)
if ood:
### Compute predictions and save
summary_to_add = get_prediction_summary(
method, model_path, model, x_batch, y_batch, ood)
df_pred_image = df_pred_image.append(summary_to_add, ignore_index=True)
else:
### Compute adversarial mask
# mask_batch = create_adversarial_pattern(model, tf.convert_to_tensor(x_batch), tf.convert_to_tensor(y_batch))
mask_batch = create_adversarial_pattern(model, x_batch, y_batch)
mask_batch = mask_batch.numpy().astype(np.int8)
for eps in adv_eps:
### Apply adversarial noise
x_batch += (eps * mask_batch.astype(np.float32))
x_batch = np.clip(x_batch, 0, 1)
### Compute predictions and save
summary_to_add = get_prediction_summary(
method, model_path, model, x_batch, y_batch, ood, mask_batch, eps)
df_pred_image = df_pred_image.append(summary_to_add, ignore_index=True)
return df_pred_image
def get_prediction_summary(method, model_path, model, x_batch, y_batch, ood, mask_batch=None, eps=0.0):
if mask_batch is None:
mask_batch = np.zeros_like(x_batch)
### Collect the predictions
mu_batch, sigma_batch = predict(method, model, x_batch)
mu_batch = np.clip(mu_batch, 0, 1)
sigma_batch = sigma_batch.numpy()
### Save the predictions to some dataframes for later analysis
summary = [{"Method": method.value, "Model Path": model_path,
"Input": x, "Target": y, "Mu": mu, "Sigma": sigma,
"Adv. Mask": mask, "Epsilon": eps, "OOD": ood}
for x,y,mu,sigma,mask in zip(x_batch, y_batch, mu_batch, sigma_batch, mask_batch)]
return summary
def df_image_to_pixels(df, keys=["Target", "Mu", "Sigma"]):
required_keys = ["Method", "Model Path"]
keys = required_keys + keys
key_types = {key: type(df[key].iloc[0]) for key in keys}
max_shape = max([np.prod(np.shape(df[key].iloc[0])) for key in keys])
contents = {}
for key in keys:
if np.prod(np.shape(df[key].iloc[0])) == 1:
contents[key] = np.repeat(df[key], max_shape)
else:
contents[key] = np.stack(df[key], axis=0).flatten()
df_pixel = pd.DataFrame(contents)
return df_pixel
def gen_cutoff_plot(df_image, eps=0.0, ood=False, plot=True):
print(f"Generating cutoff plot with eps={eps}, ood={ood}")
df = df_image[(df_image["Epsilon"]==eps) & (df_image["OOD"]==ood)]
df_pixel = df_image_to_pixels(df, keys=["Target", "Mu", "Sigma"])
df_cutoff = pd.DataFrame(
columns=["Method", "Model Path", "Percentile", "Error"])
for method, model_path_list in trained_models.items():
for model_i, model_path in enumerate(tqdm(model_path_list)):
df_model = df_pixel[(df_pixel["Method"]==method.value) & (df_pixel["Model Path"]==model_path)]
df_model = df_model.sort_values("Sigma", ascending=False)
percentiles = np.arange(100)/100.
cutoff_inds = (percentiles * df_model.shape[0]).astype(int)
df_model["Error"] = np.abs(df_model["Mu"] - df_model["Target"])
mean_error = [df_model[cutoff:]["Error"].mean()
for cutoff in cutoff_inds]
df_single_cutoff = pd.DataFrame({'Method': method.value, 'Model Path': model_path,
'Percentile': percentiles, 'Error': mean_error})
df_cutoff = df_cutoff.append(df_single_cutoff)
df_cutoff["Epsilon"] = eps
if plot:
print("Plotting cutoffs")
sns.lineplot(x="Percentile", y="Error", hue="Method", data=df_cutoff)
plt.savefig(os.path.join(output_dir, f"cutoff_eps-{eps}_ood-{ood}.pdf"))
plt.show()
sns.lineplot(x="Percentile", y="Error", hue="Model Path", style="Method", data=df_cutoff)
plt.savefig(os.path.join(output_dir, f"cutoff_eps-{eps}_ood-{ood}_trial.pdf"))
plt.show()
g = sns.FacetGrid(df_cutoff, col="Method", legend_out=False)
g = g.map_dataframe(sns.lineplot, x="Percentile", y="Error", hue="Model Path")#.add_legend()
plt.savefig(os.path.join(output_dir, f"cutoff_eps-{eps}_ood-{ood}_trial_panel.pdf"))
plt.show()
return df_cutoff
def gen_calibration_plot(df_image, eps=0.0, ood=False, plot=True):
print(f"Generating calibration plot with eps={eps}, ood={ood}")
df = df_image[(df_image["Epsilon"]==eps) & (df_image["OOD"]==ood)]
# df = df.iloc[::10]
df_pixel = df_image_to_pixels(df, keys=["Target", "Mu", "Sigma"])
df_calibration = pd.DataFrame(
columns=["Method", "Model Path", "Expected Conf.", "Observed Conf."])
for method, model_path_list in trained_models.items():
for model_i, model_path in enumerate(tqdm(model_path_list)):
df_model = df_pixel[(df_pixel["Method"]==method.value) & (df_pixel["Model Path"]==model_path)]
expected_p = np.arange(41)/40.
observed_p = []
for p in expected_p:
ppf = scipy.stats.norm.ppf(p, loc=df_model["Mu"], scale=df_model["Sigma"])
obs_p = (df_model["Target"] < ppf).mean()
# obs_p = 1/2 * ((df_model["Target"] < ppf).mean() + (df_model["Target"] < (2*df_model["Mu"]-ppf)).mean())
observed_p.append(obs_p)
df_single = pd.DataFrame({'Method': method.value, 'Model Path': model_path,
'Expected Conf.': expected_p, 'Observed Conf.': observed_p})
df_calibration = df_calibration.append(df_single)
df_truth = pd.DataFrame({'Method': Model.GroundTruth.value, 'Model Path': "",
'Expected Conf.': expected_p, 'Observed Conf.': expected_p})
df_calibration = df_calibration.append(df_truth)
df_calibration['Calibration Error'] = np.abs(df_calibration['Expected Conf.'] - df_calibration['Observed Conf.'])
df_calibration["Epsilon"] = eps
table = df_calibration.groupby(["Method", "Model Path"])["Calibration Error"].mean().reset_index()
table = pd.pivot_table(table, values="Calibration Error", index="Method", aggfunc=[np.mean, np.std, scipy.stats.sem])
if plot:
print(table)
table.to_csv(os.path.join(output_dir, "calib_errors.csv"))
print("Plotting confidence plots")
sns.lineplot(x="Expected Conf.", y="Observed Conf.", hue="Method", data=df_calibration)
plt.savefig(os.path.join(output_dir, f"calib_eps-{eps}_ood-{ood}.pdf"))
plt.show()
g = sns.FacetGrid(df_calibration, col="Method", legend_out=False)
g = g.map_dataframe(sns.lineplot, x="Expected Conf.", y="Observed Conf.", hue="Model Path")#.add_legend()
plt.savefig(os.path.join(output_dir, f"calib_eps-{eps}_ood-{ood}_panel.pdf"))
plt.show()
return df_calibration, table
def gen_adv_plots(df_image, ood=False):
print(f"Generating calibration plot with ood={ood}")
df = df_image[df_image["OOD"]==ood]
# df = df.iloc[::10]
df_pixel = df_image_to_pixels(df, keys=["Target", "Mu", "Sigma", "Epsilon"])
df_pixel["Error"] = np.abs(df_pixel["Mu"] - df_pixel["Target"])
df_pixel["Entropy"] = 0.5*np.log(2*np.pi*np.exp(1.)*(df_pixel["Sigma"]**2))
### Plot epsilon vs error per method
df = df_pixel.groupby([df_pixel.index, "Method", "Model Path", "Epsilon"]).mean().reset_index()
df_by_method = df_pixel.groupby(["Method", "Model Path", "Epsilon"]).mean().reset_index()
sns.lineplot(x="Epsilon", y="Error", hue="Method", data=df_by_method)
plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_method_error.pdf"))
plt.show()
### Plot epsilon vs uncertainty per method
sns.lineplot(x="Epsilon", y="Sigma", hue="Method", data=df_by_method)
plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_method_sigma.pdf"))
plt.show()
# df_by_method["Entropy"] = 0.5*np.log(2*np.pi*np.exp(1.)*(df_by_method["Sigma"]**2))
# sns.lineplot(x="Epsilon", y="Entropy", hue="Method", data=df_by_method)
# plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_method_entropy.pdf"))
# plt.show()
### Plot entropy cdf for different epsilons
df_cumdf = pd.DataFrame(columns=["Method", "Model Path", "Epsilon", "Entropy", "CDF"])
unc_ = np.linspace(df["Entropy"].min(), df["Entropy"].max(), 100)
for method in df["Method"].unique():
for model_path in df["Model Path"].unique():
for eps in df["Epsilon"].unique():
df_subset = df[
(df["Method"]==method) &
(df["Model Path"]==model_path) &
(df["Epsilon"]==eps)]
if len(df_subset) == 0:
continue
unc = np.sort(df_subset["Entropy"])
prob = np.linspace(0,1,unc.shape[0])
f_cdf = scipy.interpolate.interp1d(unc, prob, fill_value=(0.,1.), bounds_error=False)
prob_ = f_cdf(unc_)
df_single = pd.DataFrame({'Method': method, 'Model Path': model_path,
'Epsilon': eps, "Entropy": unc_, 'CDF': prob_})
df_cumdf = df_cumdf.append(df_single)
g = sns.FacetGrid(df_cumdf, col="Method")
g = g.map_dataframe(sns.lineplot, x="Entropy", y="CDF", hue="Epsilon", ci=None).add_legend()
plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_cdf_method.pdf"))
plt.show()
# NOT USED FOR THE FINAL PAPER, BUT FEEL FREE TO UNCOMMENT AND RUN
# ### Plot calibration for different epsilons/methods
# print("Computing calibration plots per epsilon")
# calibrations = []
# tables = []
# for eps in tqdm(df["Epsilon"].unique()):
# df_calibration, table = gen_calibration_plot(df_image.copy(), eps, plot=False)
# calibrations.append(df_calibration)
# tables.append(table)
# df_calibration = pd.concat(calibrations, ignore_index=True)
# df_table = pd.concat(tables, ignore_index=True)
# df_table.to_csv(os.path.join(output_dir, f"adv_ood-{ood}_calib_error.csv"))
#
#
# sns.catplot(x="Method", y="Calibration Error", hue="Epsilon", data=df_calibration, kind="bar")
# plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_calib_error_method.pdf"))
# plt.show()
#
# sns.catplot(x="Epsilon", y="Calibration Error", hue="Method", data=df_calibration, kind="bar")
# plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_calib_error_epsilon.pdf"))
# plt.show()
#
# g = sns.FacetGrid(df_calibration, col="Method")
# g = g.map_dataframe(sns.lineplot, x="Expected Conf.", y="Observed Conf.", hue="Epsilon")
# g = g.add_legend()
# plt.savefig(os.path.join(output_dir, f"adv_ood-{ood}_calib_method.pdf"))
# plt.show()
def gen_ood_comparison(df_image, unc_key="Entropy"):
print(f"Generating OOD plots with unc_key={unc_key}")
df = df_image[df_image["Epsilon"]==0.0] # Remove adversarial noise experiments
# df = df.iloc[::5]
df_pixel = df_image_to_pixels(df, keys=["Target", "Mu", "Sigma", "OOD"])
df_pixel["Entropy"] = 0.5*np.log(2*np.pi*np.exp(1.)*(df_pixel["Sigma"]**2))
df_by_method = df_pixel.groupby(["Method","Model Path", "OOD"])
df_by_image = df_pixel.groupby([df_pixel.index, "Method","Model Path", "OOD"])
df_mean_unc = df_by_method[unc_key].mean().reset_index() #mean of all pixels per method
df_mean_unc_img = df_by_image[unc_key].mean().reset_index() #mean of all pixels in every method and image
sns.catplot(x="Method", y=unc_key, hue="OOD", data=df_mean_unc_img, kind="violin")
plt.savefig(os.path.join(output_dir, f"ood_{unc_key}_violin.pdf"))
plt.show()
sns.catplot(x="Method", y=unc_key, hue="OOD", data=df_mean_unc_img, kind="box", whis=0.5, showfliers=False)
plt.savefig(os.path.join(output_dir, f"ood_{unc_key}_box.pdf"))
plt.show()
### Plot PDF for each Method on both OOD and IN
g = sns.FacetGrid(df_mean_unc_img, col="Method", hue="OOD")
g.map(sns.distplot, "Entropy").add_legend()
plt.savefig(os.path.join(output_dir, f"ood_{unc_key}_pdf_per_method.pdf"))
plt.show()
### Grab some sample images of most and least uncertainty
for method in df_mean_unc_img["Method"].unique():
imgs_max = dict()
imgs_min = dict()
for ood in df_mean_unc_img["OOD"].unique():
df_subset = df_mean_unc_img[
(df_mean_unc_img["Method"]==method) &
(df_mean_unc_img["OOD"]==ood)]
if len(df_subset) == 0:
continue
def get_imgs_from_idx(idx):
i_img = df_subset.loc[idx]["level_0"]
img_data = df_image.loc[i_img]
sigma = np.array(img_data["Sigma"])
entropy = np.log(sigma**2)
ret = [img_data["Input"], img_data["Mu"], entropy]
return list(map(trim, ret))
def idxquantile(s, q=0.5, *args, **kwargs):
qv = s.quantile(q, *args, **kwargs)
return (s.sort_values()[::-1] <= qv).idxmax()
imgs_max[ood] = get_imgs_from_idx(idx=idxquantile(df_subset["Entropy"], 0.95))
imgs_min[ood] = get_imgs_from_idx(idx=idxquantile(df_subset["Entropy"], 0.05))
all_entropy_imgs = np.array([ [d[ood][2] for ood in d.keys()] for d in (imgs_max, imgs_min)])
entropy_bounds = (all_entropy_imgs.min(), all_entropy_imgs.max())
Path(os.path.join(output_dir, "images")).mkdir(parents=True, exist_ok=True)
for d in (imgs_max, imgs_min):
for ood, (x, y, entropy) in d.items():
id = os.path.join(output_dir, f"images/method_{method}_ood_{ood}_entropy_{entropy.mean()}")
cv2.imwrite(f"{id}_0.png", 255*x)
cv2.imwrite(f"{id}_1.png", apply_cmap(y, cmap=cv2.COLORMAP_JET))
entropy = (entropy - entropy_bounds[0]) / (entropy_bounds[1]-entropy_bounds[0])
cv2.imwrite(f"{id}_2.png", apply_cmap(entropy))
### Plot CDFs for every method on both OOD and IN
df_cumdf = pd.DataFrame(columns=["Method", "Model Path", "OOD", unc_key, "CDF"])
unc_ = np.linspace(df_mean_unc_img[unc_key].min(), df_mean_unc_img[unc_key].max(), 200)
for method in df_mean_unc_img["Method"].unique():
for model_path in df_mean_unc_img["Model Path"].unique():
for ood in df_mean_unc_img["OOD"].unique():
df = df_mean_unc_img[
(df_mean_unc_img["Method"]==method) &
(df_mean_unc_img["Model Path"]==model_path) &
(df_mean_unc_img["OOD"]==ood)]
if len(df) == 0:
continue
unc = np.sort(df[unc_key])
prob = np.linspace(0,1,unc.shape[0])
f_cdf = scipy.interpolate.interp1d(unc, prob, fill_value=(0.,1.), bounds_error=False)
prob_ = f_cdf(unc_)
df_single = pd.DataFrame({'Method': method, 'Model Path': model_path,
'OOD': ood, unc_key: unc_, 'CDF': prob_})
df_cumdf = df_cumdf.append(df_single)
sns.lineplot(data=df_cumdf, x=unc_key, y="CDF", hue="Method", style="OOD")
plt.savefig(os.path.join(output_dir, f"ood_{unc_key}_cdfs.pdf"))
plt.show()
def load_data():
import data_loader
_, (x_test, y_test) = data_loader.load_depth()
_, (x_ood_test, y_ood_test) = data_loader.load_apollo()
print("Loaded data:", x_test.shape, x_ood_test.shape)
return (x_test, y_test), (x_ood_test, y_ood_test)
def predict(method, model, x, n_samples=10):
if method == Model.Dropout:
preds = tf.stack([model(x, training=True) for _ in range(n_samples)], axis=0) #forward pass
mu, var = tf.nn.moments(preds, axes=0)
return mu, tf.sqrt(var)
elif method == Model.Evidential:
outputs = model(x, training=False)
mu, v, alpha, beta = tf.split(outputs, 4, axis=-1)
sigma = tf.sqrt(beta/(v*(alpha-1)))
return mu, sigma
elif method == Model.Ensemble:
# preds = tf.stack([f(x) for f in model], axis=0)
# y, _ = tf.split(preds, 2, axis=-1)
# mu = tf.reduce_mean(y, axis=0)
# sigma = tf.math.reduce_std(y, axis=0)
preds = tf.stack([f(x) for f in model], axis=0)
mu, var = tf.nn.moments(preds, 0)
return mu, tf.sqrt(var)
else:
raise ValueError("Unknown model")
def apply_cmap(gray, cmap=cv2.COLORMAP_MAGMA):
if gray.dtype == np.float32:
gray = np.clip(255*gray, 0, 255).astype(np.uint8)
im_color = cv2.applyColorMap(gray, cmap)
return im_color
def trim(img, k=10):
return img[k:-k, k:-k]
def normalize(x, t_min=0, t_max=1):
return ((x-x.min())/(x.max()-x.min())) * (t_max-t_min) + t_min
@tf.function
def create_adversarial_pattern(model, x, y):
x_ = tf.convert_to_tensor(x)
with tf.GradientTape() as tape:
tape.watch(x_)
if isinstance(model, list):
preds = tf.stack([model_(x_, training=False) for model_ in model], axis=0) #forward pass
pred, _ = tf.nn.moments(preds, axes=0)
else:
(pred) = model(x_, training=True)
if pred.shape[-1] == 4:
pred = tf.split(pred, 4, axis=-1)[0]
loss = edl.losses.MSE(y, pred)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, x_)
# Get the sign of the gradients to create the perturbation
signed_grad = tf.sign(gradient)
return signed_grad
if args.load_pkl:
print("Loading!")
df_image = | pd.read_pickle("cached_depth_results.pkl") | pandas.read_pickle |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn import preprocessing
from skimage.io import imread
from skimage.transform import resize
with tf.device('/gpu:0'):
################################## Load Robot data ##################################################################
Arm2_CS_State = pd.read_csv('/home/kiyanoushs/KiyanoushCodes/NeedleInsertion/Data/Arm2_CS_new.csv', header=None)
Arm2_NS_State = pd.read_csv('/home/kiyanoushs/KiyanoushCodes/NeedleInsertion/Data/Arm2_NS_new.csv', header=None)
robot_state_train_input = Arm2_CS_State[0:50244][:]
print("Robot state input trainingset size: {}".format(robot_state_train_input.shape))
robot_state_train_label = Arm2_NS_State[0:50244][:]
print("Robot state label trainingset size: {}".format(robot_state_train_label.shape))
robot_state_test_input = Arm2_CS_State[50244:][:]
print("Robot state input testset size: {}".format(robot_state_test_input.shape))
robot_state_test_label = Arm2_NS_State[50244:][:]
print("Robot state label testset size: {}".format(robot_state_test_label.shape))
################################## Standardization ###################################################################
CS_train_names = robot_state_train_input.columns
NS_train_names = robot_state_train_label.columns
CS_test_names = robot_state_test_input.columns
NS_test_names = robot_state_test_label.columns
scaler = preprocessing.StandardScaler()
input_Scaler = scaler.fit(robot_state_train_input)
output_Scaler = scaler.fit(robot_state_train_label)
robot_state_train_input = input_Scaler.transform(robot_state_train_input)
robot_state_train_label = output_Scaler.transform(robot_state_train_label)
robot_state_test_input = input_Scaler.transform(robot_state_test_input)
robot_state_test_label = output_Scaler.transform(robot_state_test_label)
robot_state_train_input = pd.DataFrame(robot_state_train_input, columns=CS_train_names)
robot_state_train_label = | pd.DataFrame(robot_state_train_label, columns=NS_train_names) | pandas.DataFrame |
from geocode import geocoder
import pandas as pd
import time
import requests
import json
import us
def requests_per_county(mask_df, write_out_csv = True):
# Count the amount of requests per county
mask_df_counties=mask_df.groupby(['fips','county','State']).size().reset_index(name='counts')
# write out this data file to csv
if write_out_csv:
timestr = time.strftime("%Y%m%d")
path = 'findthemasks_data_processed_' + timestr + '.csv'
mask_df_counties.to_csv (path, index = False, header=True)
##### TODO
# Some of the data written out is corrupted and misaligned by row
# Not sure what the bug is right now
return mask_df_counties
def add_fips_county_info(mask_df, geocoder):
print ('Pulling geocodes from Lat+Lng. This will take awhile...')
mask_df['geocoder'] = mask_df.apply(
lambda x: geocoder.get_geocoder_info_from_rg(x['Lat'], x['Lng']), axis=1)
# Map the geocoder dict column to individual columns
mask_df['fips'] = mask_df.apply(
lambda x: x['geocoder']['fips'], axis=1)
mask_df['county'] = mask_df.apply(
lambda x: x['geocoder']['county'], axis=1)
mask_df.drop(columns=['geocoder'],inplace = True)
# Using DataFrame.drop to remove any fips code that could not be mapped
mask_df = mask_df.dropna(how='any', subset=['fips','county'])
return mask_df
def download_county_geojson_and_merge_df(geojson_url, mask_df_counties):
# Download the data
s=requests.get(geojson_url).text
# Extract the json format, and find column headers
counties = json.loads(s)
# Create counties_df from geojson counties object
counties_df = pd.DataFrame.from_dict(counties['features'])
counties_df['properties'][0]
# extract properties dict, then concatenate new clumsn and remove old properties column
counties_df = pd.concat(
[counties_df, pd.json_normalize(counties_df['properties'])], axis=1).drop(['properties'], axis=1)
# clean up the dataframe
counties_df.drop(['type','COUNTY','LSAD'], axis=1, inplace=True)
counties_df.rename(columns={'id':'fips','NAME':'county'}, inplace=True)
counties_df.head()
# join with the dataframe that has ppe requests: mask_df
merged_df = counties_df.join(
mask_df_counties[['fips','counts']].set_index('fips'),
on='fips', how='left', lsuffix='counties', rsuffix='mask_df')
# fill the NA in counts with 0s
merged_df['counts'].fillna(0, inplace=True)
# change name of column 'counts' to 'PPE_requests'
merged_df.rename(inplace=True,
columns={'counts':'PPE_requests'})
# Map fips state code to state name
merged_df['STATE'] = merged_df.apply(
lambda x: us.states.lookup(x['STATE']), axis=1)
merged_df['county_info_for_map'] = merged_df.apply(
lambda x: ('PPE Requests: %s, %s'%(x['county'],x['STATE'])), axis=1)
# Create text column for use in mapping
merged_df['ppe_text'] = 'PPE Requests: ' + merged_df['PPE_requests'].astype(int).astype(str) + '<br>'+ \
merged_df['county'].astype(str) + ', ' + merged_df['STATE'].astype(str)
# return a json object called counties for plotting, and a counties_df for joins+manipulation of other data
return counties, merged_df
def merge_covid_ppe_df(covid_df,merged_df):
merged_covid_ppe_df = merged_df.join(
covid_df[['fips','cases','deaths']].set_index('fips'),
on='fips', how='left', lsuffix='merged', rsuffix='covid_df')
# fill the NA in counts with 0s
merged_covid_ppe_df['cases'].fillna(0, inplace=True)
merged_covid_ppe_df['deaths'].fillna(0, inplace=True)
# Create text column for use in mapping
merged_covid_ppe_df['covid_text'] = merged_covid_ppe_df['county'].astype(str) + ', ' + \
merged_covid_ppe_df['STATE'].astype(str) + '<br><br>'+\
'Covid19: ' + '<br>'+ \
'Cases: ' + merged_covid_ppe_df['cases'].astype(int).astype(str) + '<br>'+ \
'Deaths: ' + merged_covid_ppe_df['deaths'].astype(int).astype(str) + '<br><br>'+ \
'PPE Requests: ' + merged_covid_ppe_df['PPE_requests'].astype(int).astype(str)
# TODO: Merge the counties geojson for all of new york
'''
print(counties['features'][0])
# possibly leverage this code to merge polygons
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
polygon1 = Polygon([(0, 0), (5, 3), (5, 0)])
polygon2 = Polygon([(0, 0), (3, 10), (3, 0)])
polygons = [polygon1, polygon2]
u = cascaded_union(polygons)
'''
return merged_covid_ppe_df
def process_hospital_data(hospital_df, write_out_csv = True):
# Sum the amount of beds per county
hospital_df_counties = hospital_df.groupby(['fips','COUNTY'])['BEDS'].sum().reset_index()
# write out this data file to csv
if write_out_csv:
timestr = time.strftime("%Y%m%d")
path = 'hospital_data_county_data_' + timestr + '.csv'
hospital_df_counties.to_csv (path, index = False, header=True)
return hospital_df_counties
def merge_covid_ppe_hosp_df(hospital_df_counties,merged_covid_ppe_df):
merged_covid_ppe_hosp_df = merged_covid_ppe_df.join(
hospital_df_counties[['fips','BEDS']].set_index('fips'),
on='fips', how='left', lsuffix='merged', rsuffix='hospital')
# fill the NA in counts with 0s
merged_covid_ppe_hosp_df['BEDS'].fillna(0, inplace=True)
# Create text column for use in mapping
merged_covid_ppe_hosp_df['hosp_text'] = merged_covid_ppe_hosp_df['county'].astype(str) + ', ' + \
merged_covid_ppe_hosp_df['STATE'].astype(str) + '<br><br>'+\
'Hospital Beds: ' + merged_covid_ppe_hosp_df['BEDS'].astype(int).astype(str) + '<br>'+ \
'<br>'+ \
'Covid19: ' + '<br>'+ \
'Cases: ' + merged_covid_ppe_hosp_df['cases'].astype(int).astype(str) + '<br>'+ \
'Deaths: ' + merged_covid_ppe_hosp_df['deaths'].astype(int).astype(str) + '<br><br>'+ \
'PPE Requests: ' + merged_covid_ppe_hosp_df['PPE_requests'].astype(int).astype(str)
return merged_covid_ppe_hosp_df
# In order to avoid divide by zero problem in lambda function within calculate_covid_per_bed_available
def weird_division(n, d):
return n / d if d else 0
def calculate_covid_per_bed_available(merged_covid_ppe_hosp_df):
# calculate the covid patients per bed, adding the column that saves this info
merged_covid_ppe_hosp_df['Covid_cases_per_bed'] = merged_covid_ppe_hosp_df.apply(
lambda x: (weird_division(x['cases'], x['BEDS'])), axis=1)
# sort by highest normalized_covid_patients_per_bed
merged_covid_ppe_hosp_df.sort_values(by='Covid_cases_per_bed', ascending=False, inplace=True)
# Create text column for use in mapping
merged_covid_ppe_hosp_df['hosp_text'] = merged_covid_ppe_hosp_df['county'].astype(str) + ', ' + \
merged_covid_ppe_hosp_df['STATE'].astype(str) + '<br><br>'+ \
'HAZARD RATIO (Cases/Bed): ' + merged_covid_ppe_hosp_df['Covid_cases_per_bed'].astype(float).astype(str) + '<br><br>'+ \
'Hospital Beds: ' + merged_covid_ppe_hosp_df['BEDS'].astype(int).astype(str) + '<br>'+ \
'<br>'+ \
'Covid19: ' + '<br>'+ \
'Cases: ' + merged_covid_ppe_hosp_df['cases'].astype(int).astype(str) + '<br>'+ \
'Deaths: ' + merged_covid_ppe_hosp_df['deaths'].astype(int).astype(str) + '<br><br>'+ \
'PPE Requests: ' + merged_covid_ppe_hosp_df['PPE_requests'].astype(int).astype(str)
return merged_covid_ppe_hosp_df
def find_counties_with_covid19_and_no_ppe_request(covid_df, mask_df_counties):
# join the covid patients dataframe with the beds per county dataframe, on the fips index
covid_ppe_df = covid_df.join(
mask_df_counties.set_index('fips'), on='fips', how='left', lsuffix='_covid', rsuffix='_ppe')
# fill the NA in normalized_covid_patients_per_bedwith 0s
covid_ppe_df['counts'].fillna(0, inplace=True)
# sort by highest normalized_covid_patients_per_bed
covid_ppe_df.sort_values(by=['counts','cases'], ascending=(True, False), inplace=True)
# change name of column 'counts' to 'PPE_requests'
covid_ppe_df.rename(inplace=True,
columns={'counts':'PPE_requests', 'county_covid':'county'})
### TODO
# There may be a mismatch of the PPE requests lat/long and those of the hospital data
# since District of Columbia is appearing at the top, and that is unlikely
return covid_ppe_df
def add_all_ppe_requests_to_merged_df(mask_df,merged_df):
# get all fips code in the mask_df frame
unique_fips=mask_df.fips.unique()
for fip in unique_fips:
# get all entries that have the same fip code and convert to dict, then string
value = json.dumps(mask_df[mask_df['fips']==fip].to_dict())
# select rows where the fips code equals fip
merged_df.loc[merged_df['fips'] == str(fip),'all_ppe_requests']=value
# fill the NA in normalized_covid_patients_per_bedwith 0s
merged_df['all_ppe_requests'].fillna(0, inplace=True)
# How to pull array of dicts from 'all_ppe_requests' category
'''
all_ppe_locations_array= eval(str(merged_df.loc[
merged_df['fips'] == '01073', 'all_ppe_requests'].values))
'''
return merged_df
def add_fips_ppe_donors(ppe_donors_df, zip_fips_df):
# zfill the fips to make sure they are right
width=5
zip_fips_df["fips"]= zip_fips_df["fips"].astype(str)
zip_fips_df["fips"]= zip_fips_df["fips"].str.zfill(width)
zip_fips_df["zip"]= zip_fips_df["zip"].astype(str)
zip_fips_df["zip"]= zip_fips_df["zip"].str.zfill(width)
# join the ppe_donors with the zip code to add fips, state, county info
ppe_donors_with_zip_df = ppe_donors_df.join(zip_fips_df.set_index('zip'),
how='left',on='zip', lsuffix='donors', rsuffix='zip')
# Clean the data by dropping rows that are missing fips
ppe_donors_with_zip_df = ppe_donors_with_zip_df.dropna(how='any', subset=['fips'])
# join the ppe_donors with the zip code to add fips, state, county info
ppe_donors_with_zip_df = ppe_donors_with_zip_df.join(zip_lat_long_df.set_index('zip'),
how='left',on='zip', lsuffix='donors', rsuffix='lat_lon')
# Clean the data by dropping rows that are missing lat lon
ppe_donors_with_zip_df = ppe_donors_with_zip_df.dropna(how='any', subset=['lat'])
return ppe_donors_with_zip_df
def donors_per_county(ppe_donors_with_zip_df,
merged_covid_ppe_hosp_df, write_out_csv = True):
# Count the amount of requests per county
donors_df_counties=ppe_donors_with_zip_df.groupby(['fips']).size().reset_index(name='ppe_donors')
# merge the donors with the larger dataframe
merged_covid_ppe_hosp_donors_df = merged_covid_ppe_hosp_df.join(
donors_df_counties[['fips','ppe_donors']].set_index('fips'),
on='fips', how='left', lsuffix='merged', rsuffix='donors')
# fill the NA in counts with 0s
merged_covid_ppe_hosp_donors_df['ppe_donors'].fillna(0, inplace=True)
# Create text column for use in mapping
merged_covid_ppe_hosp_donors_df['ppe_donors_requests_text'] = merged_covid_ppe_hosp_donors_df['county'].astype(str) + ', ' + \
merged_covid_ppe_hosp_donors_df['STATE'].astype(str) + '<br><br>'+\
'PPE Donors:' + merged_covid_ppe_hosp_donors_df['ppe_donors'].astype(int).astype(str) + '<br>'+ \
'PPE Requests:' + merged_covid_ppe_hosp_donors_df['PPE_requests'].astype(int).astype(str) + '<br><br>'+ \
'Covid19: ' + '<br>'+ \
'Cases: ' + merged_covid_ppe_hosp_donors_df['cases'].astype(int).astype(str) + '<br>'+ \
'Deaths: ' + merged_covid_ppe_hosp_donors_df['deaths'].astype(int).astype(str) + '<br><br>' + \
'HAZARD RATIO (Cases/Bed): ' + merged_covid_ppe_hosp_donors_df['Covid_cases_per_bed'].astype(float).astype(str)
return merged_covid_ppe_hosp_donors_df
# In order to avoid divide by zero problem in lambda function within calculate_donor_per_requester
def weird_division_for_donor_per_requester(n, d):
if n ==0:
return 'NA'
return n / d if d else 0
def calculate_donor_per_requester(merged_covid_ppe_hosp_donors_df):
# calculate the covid patients per bed, adding the column that saves this info
merged_covid_ppe_hosp_donors_df['PPE_Donor_Per_Requester'] = merged_covid_ppe_hosp_donors_df.apply(
lambda x: (weird_division_for_donor_per_requester(x['ppe_donors'], x['PPE_requests'])), axis=1)
# Create text column for use in mapping
merged_covid_ppe_hosp_donors_df['ppe_donors_requests_ratio_text'] = merged_covid_ppe_hosp_donors_df['county'].astype(str) + ', ' + \
merged_covid_ppe_hosp_donors_df['STATE'].astype(str) + '<br><br>'+\
'GetUsPPE Donors per Requester: ' + merged_covid_ppe_hosp_donors_df['PPE_Donor_Per_Requester'].astype(str) + '<br>'+\
'PPE Donors: ' + merged_covid_ppe_hosp_donors_df['ppe_donors'].astype(int).astype(str) + '<br>'+ \
'PPE Requests: ' + merged_covid_ppe_hosp_donors_df['PPE_requests'].astype(int).astype(str) + '<br><br>'+ \
'Covid19: ' + '<br>'+ \
'Cases: ' + merged_covid_ppe_hosp_donors_df['cases'].astype(int).astype(str) + '<br>'+ \
'Deaths: ' + merged_covid_ppe_hosp_donors_df['deaths'].astype(int).astype(str) + '<br><br>' + \
'HAZARD RATIO (Cases/Bed): ' + merged_covid_ppe_hosp_donors_df['Covid_cases_per_bed'].astype(float).astype(str)
# TODO
# Make idempotent, can't be run twice currently
return merged_covid_ppe_hosp_donors_df
# Taking original mask_df from the findthemasks website, rename to current convention
def create_requestor_df_for_querying_requesters(mask_df, merged_covid_ppe_hosp_df):
requestor_info_df = mask_df.rename(columns={
'Lat':'lat',
'Lng':'lon',
'What is the name of the hospital or clinic?':'institution',
'Street address for dropoffs?':'address',
'City':'city',
"Write drop-off instructions below or paste a link to your organization's own page containing instructions. For written instructions, please include details such as curbside procedure, mailing address, email address, and/or phone number. Please note all information entered here will be made public.": 'instructions',
'What do you need?' : 'need',
'fips':'fips'
})
### Merge on the hazard index
requestor_info_df = pd.merge(requestor_info_df,
merged_covid_ppe_hosp_df[['fips','Covid_cases_per_bed']], on='fips', how='left')
requestor_info_df = requestor_info_df.rename(
columns={'Covid_cases_per_bed':'Hazard_Index_Covid_Cases_Per_Hosp_Bed'})
#df = pd.merge(df,df2[['Key_Column','Target_Column']],on='Key_Column', how='left')
# make sure lat long are float
requestor_info_df['lat'] = | pd.to_numeric(requestor_info_df['lat'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python3
#encoding: utf-8
import pandas as pd
import numpy as np
DATAFILELOC = './data/'
OPFILELOC = './output/'
RSEGROUPS = 'rse_groups.csv'
UKRSE = 'association-members.csv'
UKRESEARCHERS = 'hesa_number_of_researchers_uk.csv'
JOBS = 'rse_like_jobs.csv'
RSPENDING = 'research_spending.csv'
SALARY = 'salary.csv'
GLOBALRESEARCHERS = 'global_researchers.csv'
POPULATION = 'population.csv'
COUNTRYCODES = 'oecd_country_codes.csv'
def import_csv_to_df(location, filename):
"""
Imports a csv file into a Pandas dataframe
:params: an xls file and a sheetname from that file
:return: a df
"""
return pd.read_csv(location + filename, low_memory=False)
def export_to_csv(df, location, filename, index_write):
"""
Exports a df to a csv file
:params: a df and a location in which to save it
:return: nothing, saves a csv
"""
return df.to_csv(location + filename + '.csv', index=index_write)
def rse_group_average(DATAFILELOC, RSEGROUPS,num_of_groups_uk):
"""
Takes the data collected from UK RSE Groups, calculates the median group size, uses that data to make up for
the missing groups (I got data from 25 of the 29 of them) and then calculates the total number of people in UK RSE
Groups.
:param DATAFILELOC: location of data files
:param RSEGROUPS: csv with data on size of RSE Groups
:param num_of_groups_uk: the number of RSE Groups in the UK
:return: the total number of RSEs in UK RSE Groups
"""
# Get data on RSE Groups
df_rse_groups = import_csv_to_df(DATAFILELOC, RSEGROUPS)
column_names = df_rse_groups.columns
# Median group size in data and number of groups in data
median_group_size = round(df_rse_groups['No. of RSEs Jan 2020'].median(),0)
num_groups_in_data = len(df_rse_groups)
# Find missing groups
missing_groups = num_of_groups_uk - num_groups_in_data
# Add dummy data to make up for RSE groups not in original data
df_extra = pd.DataFrame([[np.NaN, np.NaN, np.NaN, median_group_size, np.NaN]], columns=column_names)
for i in range(missing_groups):
df_rse_groups = df_rse_groups.append(df_extra, ignore_index=True)
rses_in_groups = df_rse_groups['No. of RSEs Jan 2020'].sum()
return rses_in_groups
def rses_in_association(DATAFILELOC, UKRSE):
"""
Takes all the post-@-part of the email addresses of people signed up to the UKRSE Association, drops all the
obviously non-UK email addresses, drops half of the .com and .org ones too. Then counts the people who are left to
say how many UK RSEs are in the UK RSE Association.
:param DATAFILELOC: location of data files
:param UKRSE: csv of last parts of email addresses of people signed up to UKRSE Association
:return: the total number of RSEs in the UKRSE Association
"""
# Get data on UKRSE Association
df_ukrse = import_csv_to_df(DATAFILELOC, UKRSE)
# Get last part of email address in new col
df_ukrse['endings'] = df_ukrse['Email'].str.rsplit('.', n=1).str[1]
# This was used in presentation, not needed for analysis
#list_uks = df_ukrse[df_ukrse['endings']=='uk']['Email'].tolist()
#print(set(list_uks))
# Find all the .uk and .scot
df_uks = df_ukrse[df_ukrse['endings'].str.contains('uk|scot')]
uks = len(df_uks)
# Find all the .com and .org
df_coms_orgs = df_ukrse[df_ukrse['endings'].str.contains('com|org')]
coms_orgs = len(df_coms_orgs)
# Calculate how many members were in the UK by keeping all the .uk and .scot, but only
# half of the .com and .org
uk_rses_in_ukrse = uks + (coms_orgs/2)
return uk_rses_in_ukrse
def researchers_in_uk(DATAFILELOC, UKRESEARCHERS):
"""
Takes data from HESA and does a load of cleaning to
:param DATAFILELOC: location of data files
:param UKRESEARCHERS: csv of researchers in UK from HESA website
:return: the total number of researchers in the UK
"""
# Get data on UK researchers
df_uk_research = import_csv_to_df(DATAFILELOC, UKRESEARCHERS)
# First 28 rows of the csv are metadata! No, no, it's fine HESA. I've got tons of free time, don't you worry.
# Tidydata, please. Tidydata!
df_uk_research.columns = df_uk_research.iloc[27]
df_uk_research = df_uk_research.iloc[28:]
df_uk_research.reset_index(drop=True, inplace=True)
# Cut to latest year
df_uk_research = df_uk_research[df_uk_research['Academic Year']=='2018/19']
# Cut to just the academics
# Working with HESA data is like working with angry sharks. Given any freedom, you would choose not to, but sometimes
# you're forced into it. They've encoded the data they need for filtering on the website into their datasets, so
# there's massive duplication which the following five lines of code are needed to remove. Sigh.
df_uk_research = df_uk_research[df_uk_research['Activity standard occupational classification'] == 'Total academic staff']
df_uk_research = df_uk_research[df_uk_research['Mode of employment'] == 'All']
df_uk_research = df_uk_research[df_uk_research['Contract marker'] == 'Academic']
df_uk_research = df_uk_research[df_uk_research['Country of HE provider'] == 'All']
df_uk_research = df_uk_research[df_uk_research['Region of HE provider'] == 'All']
df_uk_research = df_uk_research[df_uk_research['HE Provider'] != 'Total']
df_uk_research['Number'] = df_uk_research['Number'].astype(int)
num_uk_academics = df_uk_research['Number'].sum()
return num_uk_academics
def get_mean_rse_like_jobs(DATAFILELOC, JOBS):
"""
Very simple function to calculate the mean of a few numbers related to
RSE like jobs
:param DATAFILELOC: location of data files
:param JOBS: data on the mean fraction of jobs advertised on jobs.ac.uk that are RSE like
:return: the mean of a list of fractions
"""
# Get the annual mean data
df_annuals = import_csv_to_df(DATAFILELOC, JOBS)
mean_annuals = round(df_annuals['fraction rse-like'].mean(),2)
return mean_annuals
def we_are_not_that_big(DATAFILELOC, RSPENDING, SALARY, GLOBALRESEARCHERS, POPULATION, num_rses_uk, OPFILELOC, COUNTRYCODES):
"""
Calculates the number of RSEs worldwide. It calculates compares research spend and average salary to the UK, then
compares number of researchers employed in the country to the UK, calculates the fractional difference between the
UK and each country, then multiplies this by the (pretty well) understood number of RSEs in the UK.
:param DATAFILELOC: location of data files
:param RSPENDING: csv of research spending per country
:param SALARY: csv of average salary per country
:param GLOBALRESEARCHERS: csv of number of researchers per country (as percentage of total population)
:param POPULATION: csv of population per country
:param num_rses_uk: known number of RSEs in the UK
:param OPFILELOC: location of output files
:param COUNTRYCODES: csv of short country codes and full country name
:return: a dict containing two values, each the number of RSEs in the world as calculated by one of the two methods
"""
#Get data
df_spending = import_csv_to_df(DATAFILELOC, RSPENDING)
df_salary = import_csv_to_df(DATAFILELOC, SALARY)
df_researchers = import_csv_to_df(DATAFILELOC, GLOBALRESEARCHERS)
df_pop = import_csv_to_df(DATAFILELOC, POPULATION)
df_countries = import_csv_to_df(DATAFILELOC, COUNTRYCODES)
df_countries.columns = ['country', 'LOCATION']
#Cut data to 2017 (the most recent year with the most data) and drop OECD and EU28 rows
# Set the year of interest
year_int = 2017
df_spending = df_spending[df_spending['TIME']==year_int]
df_spending = df_spending[df_spending['MEASURE']=='MLN_USD']
df_spending = df_spending[df_spending['LOCATION']!='OECD']
df_spending = df_spending[df_spending['LOCATION'] != 'EU28']
df_salary = df_salary[df_salary['TIME']==year_int]
df_researchers = df_researchers[df_researchers['TIME'] == year_int]
df_researchers = df_researchers[df_researchers['SUBJECT'] == 'TOT']
df_researchers = df_researchers[df_researchers['MEASURE'] == '1000EMPLOYED']
df_researchers = df_researchers[df_researchers['LOCATION']!='OECD']
df_researchers = df_researchers[df_researchers['LOCATION'] != 'EU28']
df_pop = df_pop[df_pop['TIME']==year_int]
df_pop = df_pop[df_pop['SUBJECT']=='TOT']
df_pop = df_pop[df_pop['MEASURE'] == 'MLN_PER']
# No salary data for China in OECD data, so have to add it (pinch of salt needed here)
# Average salary in China in 2017 (https://www.statista.com/statistics/278349/average-annual-salary-of-an-employee-in-china/#:~:text=In%202018%2C%20an%20employee%20in,yuan%20on%20average%20in%202017.)
av_salary = 74318
# USD to CNY exchange rate on 31 December 2017 (https://www.xe.com/currencytables/?from=USD&date=2017-12-31)
exg_rate = 0.1537053666
av_salary = av_salary * exg_rate
# Create dataframe
salary_columns = df_salary.columns
df_china = pd.DataFrame(columns=salary_columns)
df_china.loc[0] = ['CHN','AVWAGE','TOT','USD',np.NaN,'2017',av_salary,np.NaN]
# Add China data
df_salary = df_salary.append(df_china, ignore_index=True)
# Assume we're only half right about the number of RSEs in the UK
num_rses_uk = num_rses_uk/2
# Keep only countries for which I have spending and salary data
df_spends = pd.merge(df_spending, df_salary, on='LOCATION', how='inner', suffixes=('_spend', '_salary'))
# Calculate scaling fraction
df_spends['spend/salary'] = df_spends['Value_spend'] / df_spends['Value_salary']
uk_spend = df_spends.loc[df_spends['LOCATION'] == 'GBR', 'spend/salary'].tolist()[0]
df_spends['fraction_spends'] = df_spends['spend/salary'] / uk_spend
df_spends['num rses_spends'] = df_spends['fraction_spends'] * num_rses_uk
# Keep only countries where I have percentage of researchers and population
df_people = pd.merge(df_researchers, df_pop, on='LOCATION', how='inner', suffixes=('_rschrs', '_pop'))
# Calculate scaling fraction
# Researcher data is per 1000 employed, population data is in millions of people, so...
df_people['Value_rschrs'] = df_people['Value_rschrs']*1000
df_people['MEASURE_rschrs'] = '1000000EMPLOYED'
df_people['tot_researchers'] = df_people['Value_rschrs'] * df_people['Value_pop']
uk_researchers = df_people.loc[df_people['LOCATION'] == 'GBR', 'tot_researchers'].tolist()[0]
df_people['fraction_researchers'] = df_people['tot_researchers'] / uk_researchers
df_people['num rses_researchers'] = df_people['fraction_researchers'] * num_rses_uk
# Stick into a df
df = | pd.merge(df_spends, df_people, on='LOCATION', how='outer') | pandas.merge |
def autoNewDirs():
import os, shutil
from fup.helpers.files import originalFilesPaths, getfileSizeMtime
from fup.utils.commun import generateID, current_date
from fup.utils.jsoninfo import configInfo
config = configInfo()
bindir = os.path.abspath(config["path_to_bin"])
filesinfodict = originalFilesPaths(infoDict={}, auto=True)
newdirsNames = list(filesinfodict.keys())
unassignedpath = os.path.abspath(config['path_to_batches_unassigned'])
unassigneddirli = os.listdir(unassignedpath)
unsdict = {}
for d in unassigneddirli:
commName = d.split('BID_')[0].strip()
unsdict[commName] = d
unassigneddirNames = list(unsdict.keys())
communliBatch = list(set(newdirsNames).intersection(unassigneddirNames))
auto = False
infoDictli = []
tobinli = []
for opac, vdict in filesinfodict.items():
#similar to uploadFilesCreateBatch, but without flask file object
batchID = generateID()
operator = opac.split(' ')[0]
aircraft = opac.split(' ')[1]
bindir_batch = os.path.join(bindir, batchID)
if opac not in communliBatch:
batchNameFolder = operator+' '+ aircraft +' BID_'+batchID
path = os.path.join(unassignedpath, batchNameFolder)
os.mkdir(path)
else:
auto = True
communOpAc = list(set([opac]).intersection(communliBatch))
batchNameFolder = unsdict[communOpAc[0]]
path = os.path.join(unassignedpath, batchNameFolder)
existingBatchID = batchNameFolder.split('BID_')[-1].replace('_', '')
bindir_batch = os.path.join(bindir, existingBatchID)
tobinli.append({'source': vdict['rootpath'], 'destination': bindir_batch})
filesnameli = []
fileIDli = []
for file in vdict['files']:
if auto:
#print("yuhuu file",file)
filepath = file
fileinfo = getfileSizeMtime(filepath)
fileinfo["FileName"] = file.split("\\")[-1]
responseFileInfo = checkFileInfo(fileinfo)
if responseFileInfo != True:
return responseFileInfo, auto, auto
filename = file.split('\\')[-1]
fileid = generateID()
newFileName = 'FID_'+fileid+' '+filename
save_path = os.path.join(path, newFileName)
filesnameli.append(filename)
fileIDli.append(fileid)
try:
shutil.copy2(file, save_path)
except Exception as e:
return str(e), str(e), str(e)
orgfilesname = ', '.join(filesnameli)
orgfilespath = path
filesId = ', '.join(fileIDli)
addedDate = current_date()
infoaddDict = {'BatchID': batchID,
'Aircraft': aircraft,
'Operator': operator,
'OriginalFilesName': orgfilesname,
'OriginalFilesPath': orgfilespath,
'FilesID': filesId,
'AddedDate': addedDate
}
infoDictli.append(infoaddDict)
#print(infoaddDict)
return infoDictli, auto, tobinli
def originalFilesPaths(infoDict, auto=False):
import os, re
from fup.utils.commun import getDirs
from fup.utils.jsoninfo import configInfo
config = configInfo()
newFilesPath = config["path_to_new_opfiles"]
newFilesPath = os.path.abspath(newFilesPath)
orgdirli = os.listdir(newFilesPath)
if auto:
orgdirs = [os.path.join(newFilesPath, adir) for adir in orgdirli]
orgdirs = getDirs(orgdirs)
dirsdict = {}
for path in orgdirs:
try:
op = path.split('\\')[-1].split(' ')[0].strip()
ac = str(path.split('\\')[-1].split(' ')[1].strip())
if not re.search('A', ac):
ac = 'A'+ac
opac = op+' '+ac
infoDict['Operator'] = op
infoDict['Aircraft'] = op
filespath = originalFilesPaths(infoDict, auto=False) #recursive
dirsdict[opac] = {'files': filespath, 'rootpath':path}
except:#in case there is no op or ac
pass
#print(dirsdict)
return dirsdict
else:
#Get original files paths to the new files added to batch
try:
orgdirli = [p for p in orgdirli if re.search(infoDict['Operator'], p)]
orgdirli = [p for p in orgdirli if re.search(infoDict['Aircraft'], p) or re.search(infoDict['Aircraft'][1:], p)]
except:
response = "Can't collect Operator and Aircraft info.."
return response
if len(orgdirli) == 1:
orgdir = orgdirli[0]
else:
response = "Operator '{}' with Aircraft '{}' was not found in NEW folder!".format(infoDict['Operator'], infoDict['Aircraft'])
return response
orgpath = os.path.join(newFilesPath, orgdir)
filespath = [os.path.join(orgpath, filepath) for filepath in os.listdir(orgpath)]
#print('asd',filespath)
return filespath
def matchOriginalinNew(orgfiles, newfiles):
#take 2 lists and see if original is found in new, return a dict
import re
fid_pattern = r"^FID_[a-zA-Z0-9]{6}\n*"
newfilesdict = {}
for file in newfiles:
if re.match(fid_pattern, file):
fid = str(re.search(fid_pattern, file).group()).replace('FID_', '')
fileName = str(file.replace(str('FID_' + fid), '')).strip()
#print("fid, file ", fid, fileName)
newfilesdict[fid] = fileName
return newfilesdict
def getFileId(filepath, matchedFilesdict):
from fup.utils.commun import delPunctuationMarks
file = filepath.split('\\')[-1]
#print('getFileIdfunc: ', file, matchedFilesdict)
for kid, vfname in matchedFilesdict.items():
if delPunctuationMarks(vfname) == delPunctuationMarks(file):
#print(kid, vfname)
return kid, vfname
def getfileSizeMtime(filepath):
import os, time
from time import mktime
from datetime import datetime
metadata = os.stat(filepath)
file_size = str(metadata.st_size) # bytes
filetime = time.localtime(metadata.st_mtime)
dt = datetime.fromtimestamp(mktime(filetime))
creation_date = dt.strftime('%d-%m-%Y')
fileinfodict = {'FileSizeBytes':file_size,
'ModificationDate': creation_date
}
return fileinfodict
def checkFileInfo(fileinfo):
import re
import pandas as pd
from fup.utils.dbwrap import sql2df
from fup.helpers.files import delDirsnotindb
from fup.utils.commun import delPunctuationMarks
#print("fileinfo ",fileinfo)
histdf = sql2df('fileshistory')
filedict = {}
for k, v in fileinfo.items():
filedict[k] = [v]
filedf = pd.DataFrame.from_dict(filedict)
#print("yuhuu filedict", filedict)
merged_name = filedf.merge(histdf, left_on=['FileName'], right_on=['FileName'], suffixes=('', '_y'))
colstodel = [col for col in merged_name.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_name.drop(col, axis=1, inplace=True)
merged_size = filedf.merge(histdf, left_on=['FileSizeBytes'], right_on=['FileSizeBytes'], suffixes=('', '_y'))
colstodel = [col for col in merged_size.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_size.drop(col, axis=1, inplace=True)
merged_mtime = filedf.merge(histdf, left_on=['ModificationDate'], right_on=['ModificationDate'], suffixes=('', '_y'))
colstodel = [col for col in merged_mtime.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_mtime.drop(col, axis=1, inplace=True)
if (merged_name.shape[0] == 0):
return True
elif (merged_name.shape[0] == 0) and (merged_size.shape[0] == 0):
return True
elif (merged_name.shape[0] == 0) and (merged_size.shape[0] == 0) and (merged_mtime.shape[0] == 0):
return True
else:
try:
filename_merge = merged_name['FileName'].tolist()[0]
for fname in histdf['FileName']:
if delPunctuationMarks(fname) == delPunctuationMarks(filename_merge):
histdf_filtered = histdf[histdf['FileName'] == fname]
filename_hist = histdf_filtered['FileName'].tolist()
batchid_hist = histdf_filtered['AddedInBatch'].tolist()
fileid_hist = histdf_filtered['FileID'].tolist()
delDirsnotindb()
response = "File '{}' was probably added before! Check BID_{}, FID_{}!".format(filename_hist[0], batchid_hist[0], fileid_hist[0])
#print(response)
return response
except Exception as e:
return str("Probably files in NEW are already inserted. Got: {}".format(e))
def delDirsnotindb():
import os
from fup.utils.jsoninfo import configInfo
from fup.utils.commun import deletetree
from fup.helpers.batch import batchExists
config = configInfo()
unassignedpath = os.path.abspath(config['path_to_batches_unassigned'])
unassigneddirli = os.listdir(unassignedpath)
todelDirs = {}
for batchNameFolder in unassigneddirli:
bid = batchNameFolder.split('BID_')[-1].replace('_', '')
if batchNameFolder == '_info.txt':
continue
if not batchExists(bid):
todelDirs[bid] = batchNameFolder
for kbid, vdirName in todelDirs.items():
deldir = os.path.join(unassignedpath, vdirName)
deletetree(deldir)
def updateDBforNewFiles():
#Verify if new files were added to a existing batch if so, update db
import os, re
import pandas as pd
from fup.utils.dbwrap import sql_insertDict, sql_updateDict, get_dftable, sql_deleteRow
from fup.helpers.batch import batchInfo
from fup.helpers.files import getfileSizeMtime
from fup.utils.commun import list_duplicates
#Update followup with the new file added to the batch
followupdf = get_dftable('followup')
orgpaths = followupdf['OriginalFilesPath'].tolist()
orgpaths_nodups = list(set(orgpaths))
newtempbid = {}
for opath in orgpaths_nodups:
bid = opath.split("\\")[-1].split('BID_')[-1].strip()
followupdf_bid = followupdf[followupdf['OriginalFilesPath'].str.contains('|'.join([bid]), na=False)]
bids = followupdf_bid["BatchID"].tolist()
bidtodelli = [b for b in bids if b != bid]
tempd = {}
for biddel in bidtodelli:
infobatch_previous = batchInfo(biddel)
if infobatch_previous != False:
for k in list(infobatch_previous.keys()):
if k not in ['OriginalFilesName', 'FilesID', 'ChangesLog', 'BatchID']:
infobatch_previous.pop(k, None)
tempd["prevInfo"] = infobatch_previous
# else:
# response_notfound = "BatchID {} is not in database! Please delete from unassigned folder {}!".format(existingBatchID, existingBatchID)
# tempd["prevInfo"] = response_notfound
# #return response_notfound, response_notfound, response_notfound
newtempbid[bid] = tempd
orgpaths_dups = list_duplicates(orgpaths)
existingbid = {}
for opath in orgpaths_dups:
tempd = {}
bid = opath.split("\\")[-1].split('BID_')[-1].strip()
infobatch_previous = batchInfo(bid)
if infobatch_previous != False:
for k in list(infobatch_previous.keys()):
if k not in ['OriginalFilesName', 'FilesID', 'ChangesLog', 'BatchID']:
infobatch_previous.pop(k, None)
#print('OK ',infobatch_previous)
tempd["prevInfo"] = infobatch_previous
# else:
# response_notfound = "BatchID {} is not in database! Please delete from unassigned folder {}!".format(existingBatchID, existingBatchID)
# #print('NOK ',response_notfound)
# tempd["prevInfo"] = response_notfound
# #return response_notfound, response_notfound, response_notfound
existingbid[bid] = tempd
tempbidtodel = []
for bidorg, dorg in existingbid.items():
for bidtemp, dtemp in newtempbid.items():
if bidorg == bidtemp:
#make df from dict
dforg = pd.DataFrame.from_dict(dorg['prevInfo'])
dftemp = pd.DataFrame.from_dict(dtemp['prevInfo'])
todelli = dftemp['BatchID'].tolist()
for b in todelli:
tempbidtodel.append(b)
bidtodelli = list(set(tempbidtodel))
dfconcat = pd.concat([dforg, dftemp], axis=0)
dfdict = dfconcat.to_dict('list')
#Create dict to update followup
joineddict = {}
for kcol, vrow in dfdict.items():
if kcol == "BatchID":
vrow = list(set(vrow).difference(set(bidtodelli)))
try:
li = list(set(filter(None, vrow)))
vrow = ', '.join(li)
except:
pass
joineddict[kcol] = vrow
if sql_updateDict('followup', joineddict, 'BatchID') == False:
updatefup_failed = "Update in followup failed for BID_{} file {}..".format(joineddict['BatchID'], joineddict['OriginalFilesName'])
#print(updatefup_failed)
return updatefup_failed
#Delete new temp bid from db
for bid in bidtodelli:
if sql_deleteRow('followup', 'BatchID', bid):
pass
else:
#print("NOK")
return "Please delete from database {}".format(str(bidtodelli))
#Update fileshistory table in db
fileshistorydf = get_dftable('fileshistory')
fileInfoli = []
for fpath in orgpaths_nodups:
fileInfo = {}
bid = fpath.split("\\")[-1].split('BID_')[-1].strip()
fhistdf_filtered = fileshistorydf[fileshistorydf["AddedInBatch"] == bid]
fids = fhistdf_filtered["FileID"].tolist()
files = os.listdir(fpath)
fidorgli = []
for file in files:
fidorg = file.split(' ')[0].split('_')[-1]
fidorgli.append(fidorg)
newfid = list(set(fids).symmetric_difference(set(fidorgli))) # difference of/from 2 lists [1,2] and [1,2,3] => [3]
#print(newfid)
newfilepathli = []
for fid in newfid:
for file in files:
if fid == file.split(' ')[0].split('_')[-1]:
#print(fid, file)
newfilepath = os.path.join(fpath, file)
newfilepathli.append(newfilepath)
for newfilepath in newfilepathli:
fileSpec = getfileSizeMtime(newfilepath)
fileName = ' '.join(newfilepath.split('\\')[-1].split(' ')[1:])
fileInfo = {'FileID': newfid,
'AddedInBatch': [bid],
'ModificationDate': [fileSpec['ModificationDate']],
'FileName': [fileName],
'FileSizeBytes': [fileSpec['FileSizeBytes']]}
fileInfoli.append(fileInfo)
for finfodict in fileInfoli:
if sql_insertDict('fileshistory', finfodict) == False:
return "Please update manually in fileshistory {}".format(str(finfodict))
#print("update manually")
#print("return True")
return True
def saveFilesInfo(infoDict, auto):
import os
import pandas
from fup.helpers.files import getfileSizeMtime, matchOriginalinNew, getFileId, originalFilesPaths, checkFileInfo, updateDBforNewFiles
from fup.utils.dbwrap import sql_insertDict, sql2df
from fup.utils.commun import deletetree
path = infoDict['OriginalFilesPath']
#print("yuhuu ",path)
newfiles = os.listdir(path)
orgfilespath = originalFilesPaths(infoDict)
if isinstance(orgfilespath, str):
return orgfilespath #response
orgfiles = [path.split('\\')[-1] for path in orgfilespath]
matchedFiles = matchOriginalinNew(orgfiles, newfiles)
for filepath in orgfilespath:
fileinfo = getfileSizeMtime(filepath)
fileinfo['FileID'], fileinfo['FileName'] = getFileId(filepath, matchedFiles)
fileinfo['AddedInBatch'] = infoDict['BatchID']
responseFileInfo = checkFileInfo(fileinfo)
#print(filepath)
if responseFileInfo != True:
deletetree(path)
return responseFileInfo
else:
if auto:
pass
else:
if sql_insertDict('fileshistory', fileinfo) == False:
return False
return True
def unassignedtoPrepfiles():
#Copy batches from UNASSIGNED to PREPARED FILES
import os, shutil
from fup.utils.jsoninfo import configInfo
from fup.utils.commun import copytree
config = configInfo()
unassignedpath = os.path.abspath(config['path_to_batches_unassigned'])
prepfilespath = os.path.abspath(config['path_to_batches_prepfiles'])
unassigneddirli = os.listdir(unassignedpath)
for folder in unassigneddirli:
src = os.path.join(unassignedpath, folder)
dst = os.path.join(prepfilespath, folder)
try:
os.mkdir(dst)
copytree(src, dst)
except:#copy new files added to the batch
src_filesli = os.listdir(src)
dst_fileli = os.listdir(dst)
if len(src_filesli) > len(dst_fileli):
for srcFile in src_filesli:
s = os.path.join(src, srcFile)
d = os.path.join(dst, srcFile)
try:
shutil.copy2(s, d)
except:
pass
def dcsinfo(dcspath):
from lxml import etree
file = open(dcspath)
tree = etree.parse(file)
sumAll = tree.xpath('//sum')
totalRows = sum([int(s.text) for s in sumAll])
sumMpd = tree.xpath('//mpdTask//sum')
mpdtask = sum([int(s.text) for s in sumMpd])
sumOp = tree.xpath('//opeTask//sum')
optask = sum([int(s.text) for s in sumOp])
sumFindings = tree.xpath("//finding[@activated='true']//sum")
findings = sum([int(s.text) for s in sumFindings])
infodcs = {"TotalRowsNbr": totalRows,
"MPDTaskRowsNbr": mpdtask,
"OperatorRowsNbr": optask,
"FindingsRowsNbr": findings
}
return infodcs
def extendRowsFollowup():
import os
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
from fup.utils.jsoninfo import configInfo
from fup.utils.commun import current_date, listifyString, xllook
config = configInfo()
xlpath = config['path_to_excels_exported_from_database']
xlfilepath = os.path.join(xlpath, 'followup.xlsx')
#xllook(xlfilepath, 'A1:W1', close=True)
fupdf = pd.read_excel(xlfilepath)
#Append to a list of dfs, bids that have more than one file
orgfilesdfsli = []
bidtodel = []
for i, cell in enumerate(fupdf["OriginalFilesName"].tolist()):
cellli = listifyString(str(cell))
if len(cellli) > 1:
bid = fupdf.loc[i, "BatchID"]
bidtodel.append(bid)
for j, orgfile in enumerate(cellli):
#print(orgfile, bid)
fup_bid = fupdf[fupdf['BatchID'] == bid]
fup_bid.loc[i, "OriginalFilesName"] = orgfile
fidli = listifyString(fup_bid.loc[i, "FilesID"])
fup_bid.loc[i, "FilesID"] = fidli[j]
orgfilesdfsli.append(fup_bid)
#Make one df from df list created up
orgfilesdf = pd.concat(orgfilesdfsli)
#Remove from df batches that have more than one file
fupdf = fupdf[~fupdf["BatchID"].str.contains('|'.join(bidtodel), na=False)]
extended_fup = | pd.concat([fupdf, orgfilesdf]) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import logging
import warnings
import os
import pandas_datareader as pdr
from collections import Counter
from scipy import stats
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_percentage_error, mean_absolute_error
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
logging.basicConfig(filename='warnings.log',level=logging.WARNING)
logging.captureWarnings(True)
warnings.simplefilter("ignore")
def mape(y,pred):
return None if 0 in y else mean_absolute_percentage_error(y,pred) # average o(1) worst-case o(n)
def rmse(y,pred):
return mean_squared_error(y,pred)**.5
def mae(y,pred):
return mean_absolute_error(y,pred)
def r2(y,pred):
return r2_score(y,pred)
_estimators_ = {'arima', 'mlr', 'mlp', 'gbt', 'xgboost', 'rf', 'prophet', 'hwes', 'elasticnet','svr','knn','combo'}
_metrics_ = {'r2','rmse','mape','mae'}
_determine_best_by_ = {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2','InSampleRMSE','InSampleMAPE','InSampleMAE',
'InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE',
'LevelTestSetR2',None}
_colors_ = [
'#FFA500','#DC143C','#00FF7F','#808000','#BC8F8F','#A9A9A9',
'#8B008B','#FF1493','#FFDAB9','#20B2AA','#7FFFD4','#A52A2A',
'#DCDCDC','#E6E6FA','#BDB76B','#DEB887'
]*10
class ForecastError(Exception):
class CannotUndiff(Exception):
pass
class NoGrid(Exception):
pass
class PlottingError(Exception):
pass
class Forecaster:
def __init__(self,
y=pd.Series([]),
current_dates=pd.Series([]),
**kwargs):
self.y = y
self.current_dates = current_dates
self.future_dates = pd.Series([])
self.current_xreg = {} # values should be pandas series (to make differencing work more easily)
self.future_xreg = {} # values should be lists (to make iterative forecasting work more easily)
self.history = {}
self.test_length = 1
self.validation_length = 1
self.validation_metric = 'rmse'
self.integration = 0
for key, value in kwargs.items():
setattr(self,key,value)
self.typ_set() # ensures that the passed values are the right types
def __str__(self):
models = self.history.keys()
if len(models) == 0:
first_prt = 'Forecaster object with no models evaluated.'
else:
first_prt = 'Forecaster object with the following models evaluated: {}.'.format(', '.join(models))
whole_thing = first_prt + ' Data starts at {}, ends at {}, loaded to forecast out {} periods, has {} regressors.'.format(self.current_dates.min(),self.current_dates.max(),len(self.future_dates),len(self.current_xreg.keys()))
return whole_thing
def __repr__(self):
if len(self.history.keys()) > 0:
return self.export('model_summaries')
return self.history
def _adder(self):
assert len(self.future_dates) > 0,'before adding regressors, please make sure you have generated future dates by calling generate_future_dates(), set_last_future_date(), or ingest_Xvars_df(use_future_dates=True)'
def _bank_history(self,**kwargs):
call_me = self.call_me
self.history[call_me] = {
'Estimator':self.estimator,
'Xvars':self.Xvars,
'HyperParams':{k:v for k,v in kwargs.items() if k not in ('Xvars','normalizer','auto')},
'Scaler':kwargs['normalizer'] if 'normalizer' in kwargs.keys() else None if self.estimator in ('prophet','combo') else None if hasattr(self,'univariate') else 'minmax',
'Forecast':self.forecast[:],
'FittedVals':self.fitted_values[:],
'Tuned':kwargs['auto'],
'Integration':self.integration,
'TestSetLength':self.test_length,
'TestSetRMSE':self.rmse,
'TestSetMAPE':self.mape,
'TestSetMAE':self.mae,
'TestSetR2':self.r2,
'TestSetPredictions':self.test_set_pred[:],
'TestSetActuals':self.test_set_actuals[:],
'InSampleRMSE':rmse(self.y.values,self.fitted_values),
'InSampleMAPE':mape(self.y.values,self.fitted_values),
'InSampleMAE':mae(self.y.values,self.fitted_values),
'InSampleR2':r2(self.y.values,self.fitted_values),
}
if kwargs['auto']:
self.history[call_me]['ValidationSetLength'] = self.validation_length
self.history[call_me]['ValidationMetric'] = self.validation_metric
self.history[call_me]['ValidationMetricValue'] = self.validation_metric_value
for attr in ('univariate','first_obs','first_dates','grid_evaluated','models'):
if hasattr(self,attr):
self.history[call_me][attr] = getattr(self,attr)
if self.integration > 0:
first_obs = self.first_obs.copy()
fcst = self.forecast[::-1]
integration = self.integration
y = self.y.to_list()[::-1]
pred = self.history[call_me]['TestSetPredictions'][::-1]
if integration == 2:
first_ = first_obs[1] - first_obs[0]
y.append(first_)
y = list(np.cumsum(y[::-1]))[::-1]
y.append(first_obs[0])
y = list(np.cumsum(y[::-1]))
fcst.append(y[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.append(y[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
if integration == 2:
fcst.reverse()
fcst.append(self.y.values[-2] + self.y.values[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.reverse()
pred.append(self.y.values[-(len(pred) - 2)] + self.y.values[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
self.history[call_me]['LevelForecast'] = fcst[:]
self.history[call_me]['LevelY'] = y[integration:]
self.history[call_me]['LevelTestSetPreds'] = pred
self.history[call_me]['LevelTestSetRMSE'] = rmse(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAPE'] = mape(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAE'] = mae(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetR2'] = r2(y[-len(pred):],pred)
else: # better to have these attributes populated for all series
self.history[call_me]['LevelForecast'] = self.forecast[:]
self.history[call_me]['LevelY'] = self.y.to_list()
self.history[call_me]['LevelTestSetPreds'] = self.test_set_pred[:]
self.history[call_me]['LevelTestSetRMSE'] = self.rmse
self.history[call_me]['LevelTestSetMAPE'] = self.mape
self.history[call_me]['LevelTestSetMAE'] = self.mae
self.history[call_me]['LevelTestSetR2'] = self.r2
def _set_summary_stats(self):
results_summary = self.regr.summary()
results_as_html = results_summary.tables[1].as_html()
self.summary_stats = pd.read_html(results_as_html, header=0, index_col=0)[0]
def _bank_fi_to_history(self):
call_me = self.call_me
self.history[call_me]['feature_importance'] = self.feature_importance
def _bank_summary_stats_to_history(self):
call_me = self.call_me
self.history[call_me]['summary_stats'] = self.summary_stats
def _parse_normalizer(self,X_train,normalizer):
if normalizer == 'minmax':
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
elif normalizer == 'scale':
from sklearn.preprocessing import Normalizer
scaler = Normalizer()
scaler.fit(X_train)
else:
scaler = None
return scaler
def _train_test_split(self,X,y,test_size):
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=test_size,shuffle=False)
return X_train, X_test, y_train, y_test
def _metrics(self,y,pred):
self.test_set_actuals = list(y)
self.test_set_pred = list(pred)
self.rmse = rmse(y,pred)
self.r2 = r2(y,pred)
self.mae = mae(y,pred)
self.mape = mape(y,pred)
def _tune(self):
metric = getattr(self,getattr(self,'validation_metric'))
for attr in ('r2','rmse','mape','mae','test_set_pred','test_set_actuals'):
delattr(self,attr)
return metric
def _scale(self,scaler,X):
if not scaler is None:
return scaler.transform(X)
else:
return X
def _clear_the_deck(self):
for attr in ('univariate','fitted_values','regr','X','feature_importance','summary_stats','models'):
try:
delattr(self,attr)
except AttributeError:
pass
def _prepare_sklearn(self,tune,Xvars):
if Xvars is None:
Xvars = list(self.current_xreg.keys())
if tune:
y = self.y.to_list()[:-self.test_length]
X = pd.DataFrame({k:v.to_list() for k, v in self.current_xreg.items()}).iloc[:-self.test_length,:]
test_size = self.validation_length
else:
y = self.y.to_list()
X = pd.DataFrame({k:v.to_list() for k, v in self.current_xreg.items()})
test_size = self.test_length
X = X[Xvars]
self.Xvars = Xvars
return Xvars, y, X, test_size
def _forecast_sklearn(self,scaler,regr,X,y,Xvars,future_dates,future_xreg,true_forecast=False):
if true_forecast:
self._clear_the_deck()
X = self._scale(scaler,X)
regr.fit(X,y)
if true_forecast:
self.regr = regr
self.X = X
self.fitted_values = list(regr.predict(X))
if len([x for x in self.current_xreg.keys() if x.startswith('AR')]) > 0:
fcst = []
for i, _ in enumerate(future_dates):
p = pd.DataFrame({k:[v[i]] for k,v in future_xreg.items() if k in Xvars})
p = self._scale(scaler,p)
fcst.append(regr.predict(p)[0])
if not i == len(future_dates) - 1:
for k, v in future_xreg.items():
if k.startswith('AR'):
ar = int(k[2:])
idx = i + 1 - ar
if idx > -1:
try:
future_xreg[k][i+1] = fcst[idx]
except IndexError:
future_xreg[k].append(fcst[idx])
else:
try:
future_xreg[k][i+1] = self.y.values[idx]
except IndexError:
future_xreg[k].append(self.y.values[idx])
else:
p = pd.DataFrame(future_xreg)
p = self._scale(scaler,p)
fcst = list(regr.predict(p))
return fcst
def _full_sklearn(self,fcster,tune,Xvars,normalizer,**kwargs):
assert len(self.current_xreg.keys()) > 0,f'need at least 1 Xvar to forecast with the {self.estimator} model'
Xvars, y, X, test_size = self._prepare_sklearn(tune,Xvars)
X_train, X_test, y_train, y_test = self._train_test_split(X,y,test_size)
scaler = self._parse_normalizer(X_train,normalizer)
X_train = self._scale(scaler,X_train)
X_test = self._scale(scaler,X_test)
regr = fcster(**kwargs)
regr.fit(X_train,y_train)
pred = self._forecast_sklearn(scaler,regr,X_train,y_train,Xvars,self.current_dates.values[-test_size:], {x:v.values[-test_size:] for x,v in self.current_xreg.items()})
self._metrics(y_test,pred)
if tune:
return self._tune()
else:
return self._forecast_sklearn(scaler,regr,X,y,Xvars,self.future_dates,self.future_xreg.copy(),true_forecast=True)
def _forecast_mlp(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
""" normalizer: {'scale','minmax',None}, default 'minmax'
"""
from sklearn.neural_network import MLPRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_mlr(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.linear_model import LinearRegression as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_xgboost(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from xgboost import XGBRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_gbt(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.ensemble import GradientBoostingRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_rf(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.ensemble import RandomForestRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_elasticnet(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.linear_model import ElasticNet as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_svr(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.svm import SVR as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_knn(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.neighbors import KNeighborsRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_hwes(self,tune=False,**kwargs):
from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES
y = self.y.to_list()
if tune:
y_train = y[:-(self.validation_length + self.test_length)]
y_test = y[-(self.test_length + self.validation_length):-self.test_length]
else:
y_train = y[:-self.test_length]
y_test = y[-self.test_length:]
self.Xvars = None
hwes_train = HWES(y_train,dates=self.current_dates.values[:-self.test_length],freq=self.freq,**kwargs).fit(optimized=True,use_brute=True)
pred = hwes_train.predict(start=len(y_train),end=len(y_train) + len(y_test) - 1)
self._metrics(y_test,pred)
if tune:
return self._tune()
else: # forecast
self._clear_the_deck()
self.univariate = True
self.X = None
regr = HWES(self.y,dates=self.current_dates,freq=self.freq,**kwargs).fit(optimized=True,use_brute=True)
self.fitted_values = list(regr.fittedvalues)
self.regr = regr
self._set_summary_stats()
return list(regr.predict(start=len(y),end=len(y) + len(self.future_dates) - 1))
def _forecast_arima(self,tune=False,Xvars=None,**kwargs):
""" Xvars = 'all' will use all Xvars except any "AR" terms since they are special and incorporated in the model already anyway
"""
from statsmodels.tsa.arima.model import ARIMA
Xvars_orig = Xvars
Xvars = [x for x in self.current_xreg.keys() if not x.startswith('AR')] if Xvars == 'all' else Xvars
Xvars, y, X, test_size = self._prepare_sklearn(tune,Xvars)
if len(self.current_xreg.keys()) > 0:
X_train, X_test, y_train, y_test = self._train_test_split(X,y,test_size)
else:
y_train = self.y.values[:test_size]
y_test = self.y.values[-test_size:]
if Xvars_orig is None:
X, X_train, X_test = None, None, None
self.Xvars = None
arima_train = ARIMA(y_train,exog=X_train,dates=self.current_dates.values[:-self.test_length],freq=self.freq,**kwargs).fit()
pred = arima_train.predict(exog=X_test,start=len(y_train),end=len(y_train) + len(y_test) - 1,typ='levels')
self._metrics(y_test,pred)
if tune:
return self._tune()
else:
self._clear_the_deck()
if Xvars_orig is None: self.univariate = True
self.X = X
regr = ARIMA(self.y.values[:],exog=X,dates=self.current_dates,freq=self.freq,**kwargs).fit()
self.fitted_values = list(regr.fittedvalues)
self.regr = regr
self._set_summary_stats()
p = pd.DataFrame({k:v for k,v in self.future_xreg.items() if k in self.Xvars}) if self.Xvars is not None else None
fcst = regr.predict(exog=p,start=len(y),end=len(y) + len(self.future_dates) - 1, typ = 'levels', dynamic = True)
return list(fcst)
def _forecast_prophet(self,tune=False,Xvars=None,cap=None,floor=None,**kwargs):
""" Xvars = 'all' will use all Xvars except any "AR" terms since they are special and incorporated in the model already anyway
"""
from fbprophet import Prophet
X = pd.DataFrame({k:v for k,v in self.current_xreg.items() if not k.startswith('AR')})
p = pd.DataFrame({k:v for k,v in self.future_xreg.items() if not k.startswith('AR')})
Xvars = [x for x in self.current_xreg.keys() if not x.startswith('AR')] if Xvars == 'all' else Xvars if Xvars is not None else []
if cap is not None: X['cap'] = cap
if floor is not None: x['floor'] = floor
X['y'] = self.y.to_list()
X['ds'] = self.current_dates.to_list()
p['ds'] = self.future_dates.to_list()
model = Prophet(**kwargs)
for x in Xvars:
model.add_regressor(x)
if tune:
X_train = X.iloc[:-(self.test_length + self.validation_length)]
X_test = X.iloc[-(self.test_length + self.validation_length):-self.test_length]
y_test = X['y'].values[-(self.test_length + self.validation_length):-self.test_length]
model.fit(X_train)
pred = model.predict(X_test)
self._metrics(y_test,pred['yhat'].to_list())
return self._tune()
else:
model.fit(X.iloc[:-self.test_length])
pred = model.predict(X.iloc[-self.test_length:])
self._metrics(X['y'].values[-self.test_length:],pred['yhat'].to_list())
self._clear_the_deck()
self.X = X[Xvars]
if len(Xvars) == 0:
self.univariate = True
self.X = None
self.Xvars = Xvars if Xvars != [] else None
regr = Prophet(**kwargs)
regr.fit(X)
self.fitted_values = regr.predict(X)['yhat'].to_list()
self.regr = regr
fcst = regr.predict(p)
return fcst['yhat'].to_list()
def _forecast_combo(self,how='simple',models='all',determine_best_by='ValidationMetricValue',rebalance_weights=.1,weights=None,splice_points=None):
""" how: one of {'simple','weighted','splice'}, default 'simple'
the type of combination
all test lengths must be the same for all combined models
models: 'all', starts with "top_", or list-like, default 'all'
which models to combine
must be at least 2 in length
if using list-like object, elements must match model nicknames specified in call_me when forecasting
determine_best_by: one of {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2InSampleRMSE','InSampleMAPE','InSampleMAE','InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE','LevelTestSetR2',None}, default 'ValidationMetricValue'
'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2InSampleRMSE','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE','LevelTestSetR2' will probably lead to overfitting (data leakage)
'InSampleMAPE','InSampleMAE','InSampleR2' probably will lead to overfitting since in-sample includes the test set and overfitted models are weighted more highly
'ValidationMetricValue' is the safest option to avoid overfitting, but only works if all combined models were tuned and the validation metric was the same for all models
rebalance_weights: float, default 0.1
a minmax/maxmin scaler is used to perform the weighted average, but this method means the worst performing model on the test set is always weighted 0
to correct that so that all models have some weight in the final combo, you can rebalance the weights but specifying this parameter
the higher this is, the closer to a simple average the weighted average becomes
must be at least 0 -- 0 means the worst model is not given any weight
weights: list-like or None
only applicable when how='weighted'
overwrites determine_best_by with None and applies those weights, automatically rebalances weights to add to one with a minmax scaler unless they already add to one
if weights already add to one, rebalance_weights is ignored
splice_points: list-like
only applicable when how='splice'
elements in array must be str in yyyy-mm-dd or datetime object
must be exactly one less in length than the number of models
models[0] --> :splice_points[0]
models[-1] --> splice_points[-1]:
"""
determine_best_by = determine_best_by if weights is None else None
models = self._parse_models(models,determine_best_by)
assert len(models) > 1,f'need at least two models to average, got {models}'
fcsts = pd.DataFrame({m:h['Forecast'] for m,h in self.history.items() if m in models})
preds = pd.DataFrame({m:h['TestSetPredictions'] for m,h in self.history.items() if m in models})
fvs = pd.DataFrame({m:h['FittedVals'] for m,h in self.history.items() if m in models})
actuals = self.y.values[-preds.shape[0]:]
if how == 'weighted':
scale = True
if weights is None:
weights = pd.DataFrame({m:[h[determine_best_by]] for m,h in self.history.items() if m in models}) # always use r2 since higher is better (could use maxmin scale for other metrics?)
else:
assert len(weights) == len(models),'must pass as many weights as models'
assert not isinstance(weights,str),f'weights argument not recognized: {weights}'
weights = pd.DataFrame(zip(models,weights)).set_index(0).transpose()
if weights.sum(axis=1).values[0] == 1:
scale = False
rebalance_weights=0
try:
assert rebalance_weights >= 0,'when using a weighted average, rebalance_weights must be numeric and at least 0 in value'
if scale:
if (determine_best_by.endswith('R2') == 'R2') | ((determine_best_by == 'ValidationMetricValue') & (self.validation_metric.upper() == 'R2')) | (weights is not None):
weights = (weights - weights.min(axis=1).values[0])/(weights.max(axis=1).values[0] - weights.min(axis=1).values[0]) # minmax scaler
else:
weights = (weights - weights.max(axis=1).values[0])/(weights.min(axis=1).values[0] - weights.max(axis=1).values[0]) # maxmin scaler
weights+=rebalance_weights # by default, add .1 to every value here so that every model gets some weight instead of 0 for the worst one
weights = weights/weights.sum(axis=1).values[0]
pred = (preds * weights.values[0]).sum(axis=1).to_list()
fv = (fvs * weights.values[0]).sum(axis=1).to_list()
fcst = (fcsts * weights.values[0]).sum(axis=1).to_list()
except ZeroDivisionError:
how = 'simple' # all models have the same test set metric value so it's a simple average (never seen this, but jic)
if how in ('simple','splice'):
pred = preds.mean(axis=1).to_list()
fv = fvs.mean(axis=1).to_list()
if how == 'simple':
fcst = fcsts.mean(axis=1).to_list()
elif how == 'splice':
assert len(models) == len(splice_points) + 1,'must have exactly 1 more model passed to models as splice points passed to splice_points'
splice_points = pd.to_datetime(sorted(splice_points)).to_list()
future_dates = self.future_dates.to_list()
assert np.array([p in future_dates for p in splice_points]).all(), 'all elements in splice_points must be datetime objects or str in yyyy-mm-dd format and must be in future_dates attribute'
fcst = [None]*len(future_dates)
start = 0
for i, _ in enumerate(splice_points):
end = [idx for idx,v in enumerate(future_dates) if v == splice_points[i]][0]
fcst[start:end] = fcsts[models[i]].values[start:end]
start = end
fcst[start:] = fcsts[models[-1]].values[start:]
self._metrics(actuals,pred)
self._clear_the_deck()
self.models = models
self.fitted_values = fv
self.Xvars = None
self.X = None
self.regr = None
return fcst
def _parse_models(self,models,determine_best_by):
if determine_best_by is None:
if models[:4] == 'top_':
raise ValueError('cannot use models starts with "top_" unless the determine_best_by or order_by argument is specified and not None')
elif models == 'all':
models = list(self.history.keys())
elif isinstance(models,str):
models = [models]
else:
models = list(models)
if len(models) == 0:
raise ValueError(f'models argument with determine_best_by={determine_best_by} returns no evaluated forecasts')
else:
all_models = [m for m,d in self.history.items() if determine_best_by in d.keys()]
all_models = self.order_fcsts(all_models,determine_best_by)
if models == 'all':
models = all_models[:]
elif models[:4] == 'top_':
models = all_models[:int(models.split('_')[1])]
elif isinstance(models,str):
models = [models]
else:
models = [m for m in all_models if m in models]
return models
def infer_freq(self):
if not hasattr(self,'freq'):
self.freq = pd.infer_freq(self.current_dates)
self.current_dates.freq = self.freq
def fillna_y(self,how='ffill'):
""" how: {'backfill', 'bfill', 'pad', 'ffill', None}
"""
self.y = pd.Series(self.y)
if how != 'midpoint': # only works if there aren't more than 2 na one after another
self.y = self.y.fillna(method=how)
else:
for i, val in enumerate(self.y.values):
if val is None:
self.y.values[i] = (self.y.values[i-1] + self.y.values[i+1]) / 2
def generate_future_dates(self,n):
""" way to specify future forecast dates by specifying a forecast period
"""
self.infer_freq()
self.future_dates = pd.Series(pd.date_range(start=self.current_dates.values[-1],periods=n+1,freq=self.freq).values[1:])
def set_last_future_date(self,date):
""" way to specify future forecast dates by specifying the last desired forecasted date and letting pandas infer the dates in between
"""
self.infer_freq()
if isinstance(date,str):
date = datetime.datetime.strptime(date,'%Y-%m-%d')
self.future_dates = pd.Series(pd.date_range(start=self.current_dates.values[-1],end=date,freq=self.freq).values[1:])
def typ_set(self):
self.y = pd.Series(self.y).dropna().astype(np.float64)
self.current_dates = pd.to_datetime(pd.Series(list(self.current_dates)[-len(self.y):]),infer_datetime_format=True)
assert len(self.y) == len(self.current_dates)
self.future_dates = pd.to_datetime(pd.Series(self.future_dates),infer_datetime_format=True)
for k,v in self.current_xreg.items():
self.current_xreg[k] = pd.Series(list(v)[-len(self.y):]).astype(np.float64)
assert len(self.current_xreg[k]) == len(self.y)
self.future_xreg[k] = [float(x) for x in self.future_xreg[k]]
def diff(self,i=1):
if hasattr(self,'first_obs'):
raise TypeError('series has already been differenced, if you want to difference again, use undiff() first, then diff(2)')
assert i in (1,2),f'only 1st and 2nd order integrations supported for now, got i={i}'
self.first_obs = self.y.values[:i] # np array
self.first_dates = self.current_dates.values[:i] # np array
self.integration = i
for _ in range(i):
self.y = self.y.diff()
for k, v in self.current_xreg.items():
if k.startswith('AR'):
ar = int(k[2:])
for _ in range(i):
self.current_xreg[k] = v.diff()
self.future_xreg[k] = [self.y.values[-ar]]
if hasattr(self,'adf_stationary'):
delattr(self,'adf_stationary')
def add_ar_terms(self,n):
self._adder()
assert isinstance(n,int),f'n must be an int, got {n}'
assert n > 0,f'n must be greater than 0, got {n}'
assert self.integration == 0,"AR terms must be added before differencing (don't worry, they will be differenced too)"
for i in range(1,n+1):
self.current_xreg[f'AR{i}'] = pd.Series(np.roll(self.y,i))
self.future_xreg[f'AR{i}'] = [self.y.values[-i]]
def add_AR_terms(self,N):
""" seasonal AR terms
N: tuple of len 2 (P,m)
"""
self._adder()
assert (len(N) == 2) & (not isinstance(N,str)),f'n must be an array-like of length 2 (P,m), got {N}'
assert self.integration == 0,"AR terms must be added before differencing (don't worry, they will be differenced too)"
for i in range(N[1],N[1]*N[0] + 1,N[1]):
self.current_xreg[f'AR{i}'] = pd.Series(np.roll(self.y,i))
self.future_xreg[f'AR{i}'] = [self.y.values[-i]]
def ingest_Xvars_df(self,df,date_col='Date',drop_first=False,use_future_dates=False):
assert df.shape[0] == len(df[date_col].unique()), 'each date supplied must be unique'
df[date_col] = pd.to_datetime(df[date_col]).to_list()
df = df.loc[df[date_col] >= self.current_dates.values[0]]
df = pd.get_dummies(df,drop_first=drop_first)
current_df = df.loc[df[date_col].isin(self.current_dates)]
future_df = df.loc[df[date_col] > self.current_dates.values[-1]]
assert current_df.shape[0] == len(self.y), 'something went wrong--make sure the dataframe spans the entire daterange as y and is at least one observation to the future and specify a date column in date_col parameter'
if not use_future_dates:
assert future_df.shape[0] >= len(self.future_dates),'the future dates in the dataframe should be at least the same length as the future dates in the Forecaster object. if you desire to use the dataframe to set the future dates for the object, use use_future_dates=True'
else:
self.infer_freq()
self.future_dates = future_df[date_col]
for c in [c for c in future_df if c != date_col]:
self.future_xreg[c] = future_df[c].to_list()[:len(self.future_dates)]
self.current_xreg[c] = current_df[c]
for x,v in self.future_xreg.items():
self.future_xreg[x] = v[:len(self.future_dates)]
if not len(v) == len(self.future_dates):
warnings.warn(f'warning: {x} is not the correct length in the future_dates attribute and this can cause errors when forecasting. its length is {len(v)} and future_dates length is {len(future_dates)}')
def set_test_length(self,n=1):
assert isinstance(n,int),f'n must be an int, got {n}'
self.test_length=n
def set_validation_length(self,n=1):
assert isinstance(n,int),f'n must be an int, got {n}'
assert n > 0,f'n must be greater than 1, got {n}'
if (self.validation_metric == 'r2') & (n == 1):
raise ValueError('can only set a validation_length of 1 if validation_metric is not r2. try set_validation_metric()')
self.validation_length=n
def adf_test(self,critical_pval=0.05,quiet=True,full_res=False,**kwargs):
res = adfuller(self.y.dropna(),**kwargs)
if not full_res:
if res[1] <= critical_pval:
if not quiet:
print('series appears to be stationary')
self.adf_stationary = True
return True
else:
if not quiet:
print('series might not be stationary')
self.adf_stationary = False
return False
else:
return res
def plot_acf(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/dev/generated/statsmodels.graphics.tsaplots.plot_acf.html
"""
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return plot_acf(y.values,**kwargs)
def plot_pacf(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/dev/generated/statsmodels.graphics.tsaplots.plot_pacf.html
"""
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return plot_pacf(y.values,**kwargs)
def plot_periodogram(self,diffy=False):
""" https://www.statsmodels.org/0.8.0/generated/statsmodels.tsa.stattools.periodogram.html
"""
from scipy.signal import periodogram
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return periodogram(y.values)
def seasonal_decompose(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html
"""
self.infer_freq()
y = self.y if not diffy else self.y.diff()
X = pd.DataFrame({'y':y.values},index=self.current_dates)
X.index.freq = self.freq
return seasonal_decompose(X.dropna(),**kwargs)
def add_seasonal_regressors(self,*args,raw=True,sincos=False,dummy=False,drop_first=False):
self._adder()
if not (raw|sincos|dummy):
raise ValueError('at least one of raw, sincos, dummy must be True')
for s in args:
try:
if s in ('week','weekofyear'):
_raw = getattr(self.current_dates.dt.isocalendar(),s)
else:
_raw = getattr(self.current_dates.dt,s)
except AttributeError:
raise ValueError(f'cannot set "{s}". see possible values here: https://pandas.pydata.org/docs/reference/api/pandas.Series.dt.year.html')
try:
_raw.astype(int)
except ValueError:
f'{s} must return an int; use dummy = True to get dummies'
if s in ('week','weekofyear'):
_raw_fut = getattr(self.future_dates.dt.isocalendar(),s)
else:
_raw_fut = getattr(self.future_dates.dt,s)
if raw:
self.current_xreg[s] = _raw
self.future_xreg[s] = _raw_fut.to_list()
if sincos:
_cycles = _raw.max() # not the best way to do this but usually good enough
self.current_xreg[f'{s}sin'] = np.sin(np.pi*_raw/(_cycles/2))
self.current_xreg[f'{s}cos'] = np.cos(np.pi*_raw/(_cycles/2))
self.future_xreg[f'{s}sin'] = np.sin(np.pi*_raw_fut/(_cycles/2)).to_list()
self.future_xreg[f'{s}cos'] = np.cos(np.pi*_raw_fut/(_cycles/2)).to_list()
if dummy:
all_dummies = []
stg_df = pd.DataFrame({s:_raw.astype(str)})
stg_df_fut = pd.DataFrame({s:_raw_fut.astype(str)})
for c,v in pd.get_dummies(stg_df,drop_first=drop_first).to_dict(orient='series').items():
self.current_xreg[c] = v
all_dummies.append(c)
for c,v in pd.get_dummies(stg_df_fut,drop_first=drop_first).to_dict(orient='list').items():
if c in all_dummies:
self.future_xreg[c] = v
for c in all_dummies:
if c not in self.future_xreg.keys():
self.future_xreg[c] = [0]*len(self.future_dates)
def add_time_trend(self,called='t'):
self._adder()
self.current_xreg[called] = pd.Series(range(len(self.y)))
self.future_xreg[called] = list(range(len(self.y) + 1,len(self.y) + 1 + len(self.future_dates)))
assert len(self.future_xreg[called]) == len(self.future_dates)
def add_other_regressor(self,called,start,end):
self._adder()
if isinstance(start,str):
start = datetime.datetime.strptime(start,'%Y-%m-%d')
if isinstance(end,str):
end = datetime.datetime.strptime(end,'%Y-%m-%d')
self.current_xreg[called] = pd.Series([1 if (x >= start) & (x <= end) else 0 for x in self.current_dates])
self.future_xreg[called] = [1 if (x >= start) & (x <= end) else 0 for x in self.future_dates]
def add_covid19_regressor(self,called='COVID19',start=datetime.datetime(2020,3,15),end=datetime.datetime(2021,5,13)): # default is from when disney world closed to the end of the national (USA) mask mandate
self._adder()
self.add_other_regressor(called=called,start=start,end=end)
def add_combo_regressors(self,*args,sep='_'):
self._adder()
assert len(args) > 1,'need at least two variables to combine regressors'
for i,a in enumerate(args):
assert not a.startswith('AR'),'no combining AR terms at this time -- it confuses the forecasting mechanism'
if i == 0:
self.current_xreg[sep.join(args)] = self.current_xreg[a]
self.future_xreg[sep.join(args)] = self.future_xreg[a]
else:
self.current_xreg[sep.join(args)] = pd.Series([a*b for a, b in zip(self.current_xreg[sep.join(args)],self.current_xreg[a])])
self.future_xreg[sep.join(args)] = [a*b for a, b in zip(self.future_xreg[sep.join(args)],self.future_xreg[a])]
def add_poly_terms(self,*args,pwr=2,sep='^'):
self._adder()
for a in args:
assert not a.startswith('AR'),'no polynomial AR terms at this time -- it confuses the forecasting mechanism'
for i in range(2,pwr+1):
self.current_xreg[f'{a}{sep}{i}'] = self.current_xreg[a]**i
self.future_xreg[f'{a}{sep}{i}'] = [x**i for x in self.future_xreg[a]]
def undiff(self,suppress_error=False):
""" always drops all regressors (to make sure lengths are right) -- just re-add them if you still want them
if the series hasn't been differenced yet, will do nothing else except raise an error -- in this case, use suppress_error to control exceptions
"""
self.current_xreg = {}
self.future_xreg = {}
if self.integration == 0:
if suppress_error:
return
else:
raise ForecastError.CannotUndiff('cannot undiff a series that was never differenced')
first_obs = self.first_obs.copy()
first_dates = list(self.first_dates.copy())
integration = self.integration
for attr in ('first_obs','first_dates'):
delattr(self,attr)
y = self.y.to_list()[::-1]
current_dates = self.current_dates.to_list()[::-1]
if integration == 2:
first_ = first_obs[1] - first_obs[0]
y.append(first_)
y = list(np.cumsum(y[::-1]))[::-1]
y.append(first_obs[0])
y = np.cumsum(y[::-1])
current_dates += first_dates
self.current_dates = pd.Series(current_dates[::-1])
self.y = pd.Series(y)
assert (len(self.current_dates) == len(self.y))
self.integration = 0
def set_estimator(self,which):
"""which: {arima, linear, logistic, boosted_tree, rf, mlp, nnetar, prophet, hwes}"""
assert which in _estimators_,f'which must be one of {_estimators_}'
self.typ_set()
if hasattr(self,'estimator'):
if which != self.estimator:
for attr in ('grid','grid_evaluated','best_params','validation_metric_value'):
if hasattr(self,attr):
delattr(self,attr)
self.estimator = which
else:
self.estimator = which
def ingest_grid(self,grid):
from itertools import product
expand_grid = lambda d: pd.DataFrame([row for row in product(*d.values())],columns=d.keys())
if isinstance(grid,str):
import Grids
grid = getattr(Grids,grid)
grid = expand_grid(grid)
self.grid = grid
def limit_grid_size(self,n):
if n >= 1:
self.grid = self.grid.sample(n=min(n,self.grid.shape[0])).reset_index(drop=True)
elif (n < 1) & (n > 0):
self.grid = self.grid.sample(frac=n).reset_index(drop=True)
else:
raise ValueError(f'argment passed to n not usable: {n}')
def set_validation_metric(self,which='rmse'):
if (which == 'r2') & (self.validation_length < 2):
raise ValueError('can only validate with r2 if the validation length is at least 2, try set_validation_length()')
self.validation_metric = which
def tune(self):
if not hasattr(self,'grid'):
try:
self.ingest_grid(self.estimator)
except SyntaxError:
raise
except:
raise ForecastError.NoGrid(f'to tune, a grid must be loaded. we tried to load a grid called {self.estimator}, but either the Grids.py file could not be found in the current directory or there is no grid with that name. try ingest_grid() with a dictionary grid passed manually.')
if self.estimator == 'combo':
raise ForecastError('combo models cannot be tuned')
self.best_params = {}
return
metrics = []
for i, v in self.grid.iterrows():
try:
metrics.append(getattr(self,f'_forecast_{self.estimator}')(tune=True,**v))
except TypeError:
raise
except Exception as e:
self.grid.drop(i,axis=0,inplace=True)
warnings.warn(f'could not evaluate the paramaters: {dict(v)}. error: {e}')
if len(metrics) > 0:
self.grid_evaluated = self.grid.copy()
self.grid_evaluated['validation_length'] = self.validation_length
self.grid_evaluated['validation_metric'] = self.validation_metric
self.grid_evaluated['metric_value'] = metrics
if self.validation_metric == 'r2':
best_params_idx = self.grid.loc[self.grid_evaluated['metric_value'] == self.grid_evaluated['metric_value'].max()].index.to_list()[0]
self.best_params = dict(self.grid.loc[best_params_idx])
else:
best_params_idx = self.grid.loc[self.grid_evaluated['metric_value'] == self.grid_evaluated['metric_value'].min()].index.to_list()[0]
self.best_params = dict(self.grid.loc[best_params_idx])
self.validation_metric_value = self.grid_evaluated.loc[best_params_idx,'metric_value']
else:
warnings.warn(f'none of the keyword/value combos stored in the grid could be evaluated for the {self.estimator} model')
self.best_params = {}
def manual_forecast(self,call_me=None,**kwargs):
call_me = self.estimator if call_me is None else call_me
assert isinstance(call_me,str),'call_me must be a str type or None'
self.forecast = getattr(self,f'_forecast_{self.estimator}')(**kwargs)
self.call_me = call_me
self._bank_history(auto=False,**kwargs)
def auto_forecast(self,call_me=None):
call_me = self.estimator if call_me is None else call_me
assert isinstance(call_me,str),'call_me must be a str type or None'
if not hasattr(self,'best_params'):
warnings.warn(f'since tune() has not been called, {self.estimator} model will be run with default parameters')
self.best_params = {}
self.forecast = getattr(self,f'_forecast_{self.estimator}')(**self.best_params)
self.call_me = call_me
self._bank_history(auto=len(self.best_params.keys()) > 0,**self.best_params)
def save_feature_importance(self,quiet=True):
import eli5
from eli5.sklearn import PermutationImportance
try:
perm = PermutationImportance(self.regr).fit(self.X,self.y)
except TypeError:
if not quiet: print(f'cannot set feature importance on the {self.estimator} model')
return
self.feature_importance = eli5.explain_weights_df(perm,feature_names=self.history[self.call_me]['Xvars']).set_index('feature')
self._bank_fi_to_history()
def save_summary_stats(self,quiet=True):
if not hasattr(self,'summary_stats'):
if not quiet: print('last model run does not have summary stats')
return
self._bank_summary_stats_to_history()
def keep_smaller_history(self,n):
if isinstance(n,str):
n = datetime.datetime.strptime(n,'%Y-%m-%d')
if (type(n) is datetime.datetime) or (type(n) is pd.Timestamp):
n = len([i for i in self.current_dates if i >= n])
assert (isinstance(n,int)) & (n > 2), 'n must be an int, datetime object, or str in yyyy-mm-dd format and there must be more than 2 observations to keep'
self.y = self.y[-n:]
self.current_dates = self.current_dates[-n:]
for k, v in self.current_xreg.items():
self.current_xreg[k] = v[-n:]
def order_fcsts(self,models,determine_best_by='TestSetRMSE'):
assert determine_best_by in _determine_best_by_,f'determine_best_by must be one of {_determine_best_by_}, got {determine_best_by}'
models_metrics = {m:self.history[m][determine_best_by] for m in models}
x = [h[0] for h in Counter(models_metrics).most_common()]
return x if (determine_best_by.endswith('R2')) | ((determine_best_by == 'ValidationMetricValue') & (self.validation_metric.upper() == 'R2')) else x[::-1]
def get_regressor_names(self):
return [k for k in self.current_xreg.keys()]
def get_freq(self):
return self.freq
def validate_regressor_names(self):
try:
assert sorted(self.current_xreg.keys()) == sorted(self.future_xreg.keys())
except AssertionError:
case1 = [k for k in self.current_xreg.keys() if k not in self.future_xreg.keys()]
case2 = [k for k in self.future_xreg.keys() if k not in self.current_xreg.keys()]
raise ValueError(f'the following regressors are in current_xreg but not future_xreg: {case1}\nthe following regressors are in future_xreg but not current_xreg {case2}')
def plot(self,models='all',order_by=None,level=False,print_attr=[]):
try:
models = self._parse_models(models,order_by)
except ValueError:
sns.lineplot(x=self.current_dates.values,y=self.y.values,label='actuals')
plt.legend()
plt.xlabel('Date')
plt.ylabel('Values')
plt.title('Plot of y Vals')
plt.show()
return
integration = set([d['Integration'] for m,d in self.history.items() if m in models])
if len(integration) > 1:
level = True
y = self.y.copy()
if self.integration == 0:
for _ in range(max(integration)):
y = y.diff()
plot = {
'date':self.current_dates.to_list()[-len(y.dropna()):] if not level else self.current_dates.to_list()[-len(self.history[models[0]]['LevelY']):],
'actuals':y.dropna().to_list() if not level else self.history[models[0]]['LevelY'],
}
print_attr_map = {}
sns.lineplot(x=plot['date'],y=plot['actuals'],label='actuals')
for i, m in enumerate(models):
plot[m] = self.history[m]['Forecast'] if not level else self.history[m]['LevelForecast']
sns.lineplot(x=self.future_dates.to_list(),y=plot[m],color=_colors_[i],label=m)
print_attr_map[m] = {a:self.history[m][a] for a in print_attr if a in self.history[m].keys()}
for m, d in print_attr_map.items():
for k, v in d.items():
print(f'{m} {k}: {v}')
plt.legend()
plt.xlabel('Date')
plt.ylabel('Values')
plt.title('Forecast Results')
plt.show()
def plot_test_set(self,models='all',order_by=None,include_train=True,level=False):
models = self._parse_models(models,order_by)
integration = set([d['Integration'] for m,d in self.history.items() if m in models])
if len(integration) > 1:
level = True
y = self.y.copy()
if self.integration == 0:
for _ in range(max(integration)):
y = y.diff()
plot = {
'date':self.current_dates.to_list()[-len(y.dropna()):] if not level else self.current_dates.to_list()[-len(self.history[models[0]]['LevelY']):],
'actuals':y.dropna().to_list() if not level else self.history[models[0]]['LevelY'],
}
if str(include_train).isnumeric():
assert (include_train > 1) & isinstance(include_train,int), f'include_train must be a bool type or an int greater than 1, got {include_train}'
plot['actuals'] = plot['actuals'][-include_train:]
plot['date'] = plot['date'][-include_train:]
elif isinstance(include_train,bool):
if not include_train:
plot['actuals'] = plot['actuals'][-self.test_length:]
plot['date'] = plot['date'][-self.test_length:]
else:
raise ValueError(f'include_train argument not recognized: ({include_train})')
sns.lineplot(x=plot['date'],y=plot['actuals'],label='actuals')
for i, m in enumerate(models):
plot[m] = self.history[m]['TestSetPredictions'] if not level else self.history[m]['LevelTestSetPreds']
test_dates = self.current_dates.to_list()[-len(plot[m]):]
sns.lineplot(x=test_dates,y=plot[m],linestyle='--',color=_colors_[i],alpha=0.7,label=m)
plt.legend()
plt.xlabel('Date')
plt.ylabel('Values')
plt.title('Test Set Results')
plt.show()
def plot_fitted(self,models='all',order_by=None):
models = self._parse_models(models,order_by)
integration = set([d['Integration'] for m,d in self.history.items() if m in models])
if len(integration) > 1:
raise ForecastError.PlottingError('cannot plot fitted values when forecasts run at different levels')
y = self.y.copy()
if self.integration == 0:
for _ in range(max(integration)):
y = y.diff()
plot = {
'date':self.current_dates.to_list()[-len(y.dropna()):],
'actuals':y.dropna().to_list(),
}
sns.lineplot(x=plot['date'],y=plot['actuals'],label='actuals')
for i, m in enumerate(models):
plot[m] = self.history[m]['FittedVals']
sns.lineplot(x=plot['date'][-len(plot[m]):],y=plot[m][-len(plot['date']):],linestyle='--',color=_colors_[i],alpha=0.7,label=m)
plt.legend()
plt.xlabel('Date')
plt.ylabel('Values')
plt.title('Fitted Values')
plt.show()
def drop_regressors(self,*args):
for a in args:
self.current_xreg.pop(a)
self.future_xreg.pop(a)
def pop(self,*args):
for a in args:
self.history.pop(a)
def export(self,
dfs=['all_fcsts','model_summaries','best_fcst','test_set_predictions','lvl_fcsts'],
models='all',
best_model='auto',
determine_best_by='TestSetRMSE',
to_excel=False,
out_path='./',
excel_name='results.xlsx'):
if isinstance(dfs,str):
dfs = [dfs]
else:
dfs = list(dfs)
if len(dfs) == 0:
raise ValueError('no dfs passed to dfs')
determine_best_by = determine_best_by if best_model == 'auto' else None
models = self._parse_models(models,determine_best_by)
_dfs_ = ['all_fcsts','model_summaries','best_fcst','test_set_predictions','lvl_fcsts']
_bad_dfs_ = [i for i in dfs if i not in _dfs_]
if len(_bad_dfs_) > 0:
raise ValueError(f'the values passed to the dfs list not valid: {_bad_dfs_}')
best_fcst_name = self.order_fcsts(models,determine_best_by)[0] if best_model == 'auto' else best_model
output = {}
if 'all_fcsts' in dfs:
all_fcsts = pd.DataFrame({'DATE':self.future_dates.to_list()})
for m in self.history.keys():
all_fcsts[m] = self.history[m]['Forecast']
output['all_fcsts'] = all_fcsts
if 'model_summaries' in dfs:
cols = [
'ModelNickname',
'Estimator',
'Xvars',
'HyperParams',
'Scaler',
'Tuned',
'Integration',
'TestSetLength',
'TestSetRMSE',
'TestSetMAPE',
'TestSetMAE',
'TestSetR2',
'LastTestSetPrediction',
'LastTestSetActual',
'InSampleRMSE',
'InSampleMAPE',
'InSampleMAE',
'InSampleR2',
'ValidationSetLength',
'ValidationMetric',
'ValidationMetricValue',
'univariate',
'models',
'integration',
'LevelTestSetRMSE',
'LevelTestSetMAPE',
'LevelTestSetMAE',
'LevelTestSetR2',
'best_model'
]
model_summaries = pd.DataFrame()
for m in models:
model_summary_m = pd.DataFrame({'ModelNickname':[m]})
for c in cols:
if c not in ('ModelNickname','LastTestSetPrediction','LastTestSetActual','best_model'):
model_summary_m[c] = [self.history[m][c] if c in self.history[m].keys() else None]
elif c == 'LastTestSetPrediction':
model_summary_m[c] = [self.history[m]['TestSetPredictions'][-1]]
elif c == 'LastTestSetActual':
model_summary_m[c] = [self.history[m]['TestSetActuals'][-1]]
elif c == 'best_model':
model_summary_m[c] = (m == best_fcst_name)
model_summaries = | pd.concat([model_summaries,model_summary_m],ignore_index=True) | pandas.concat |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data.dataset import random_split
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
#import dask.dataframe as pd
TRAIN_CSV = "/home/liuliang/deep_learning/PyTorch_mess-around/kaggle/train.csv"
TEST_CSV = "/home/liuliang/deep_learning/PyTorch_mess-around/kaggle/test.csv"
OUT_CSV = "/home/liuliang/deep_learning/PyTorch_mess-around/kaggle/sub-digit-recognizer.csv"
def build_trainval_loaders(datafield, batch_size=64, val_size=0.1):
data_n = np.array(datafield)
data_x_t = torch.tensor(data_n[:,1:]).float().reshape(-1, 1, 28, 28)
data_x_t = data_x_t.float() / 255 # normalize
data_y_t = torch.tensor(data_n[:,0])
dataset = torch.utils.data.TensorDataset(data_x_t, data_y_t)
# split for validation set
val_ds_size = int(len(dataset) * val_size)
sizes = [len(dataset) - val_ds_size, val_ds_size]
train_dataset, val_dataset = random_split(dataset, sizes)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
return train_loader, val_loader
def build_test_loader(datafield, batch_size=64):
data_n = np.array(datafield)
data_x_t = torch.tensor(data_n).float().reshape(-1, 1, 28, 28)
data_x_t = data_x_t.float() / 255 # normalize
dataset = torch.utils.data.TensorDataset(data_x_t)
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)
model = nn.Sequential(
nn.Conv2d(1, 32, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(32, 64, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Dropout(0.2),
nn.Conv2d(64, 64, (3, 3)),
nn.Dropout(0.2),
nn.Flatten(1, -1),
nn.Linear(576, 256),
nn.ReLU(),
nn.Dropout(0.4),
nn.Linear(256, 10),
)
def train(model, loss_fn, optimizer, data_loader):
model.train()
losses = 0
losses_cnt = 0
correct_cnt = 0
total_cnt = 0
for x, y in data_loader:
out = model(x)
correct_cnt += int(sum(y == torch.argmax(out, dim=1)))
total_cnt += len(x)
loss = loss_fn(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# detach the loss or graph doesn't get freed and memory keeps
# increasing
losses += loss.detach().item()
losses_cnt += 1
return losses / losses_cnt, correct_cnt / total_cnt
def evaluate(model, loss_fn, data_loader):
model.eval()
# validate
losses = 0
losses_cnt = 0
correct_cnt = 0
total_cnt = 0
with torch.no_grad():
for x, y in data_loader:
out = model(x)
loss = loss_fn(out, y)
correct_cnt += int(sum(y == torch.argmax(out, dim=1)))
total_cnt += len(x)
# detach the loss or graph doesn't get freed and memory keeps
# increasing
losses += loss.detach().item()
losses_cnt += 1
return losses / losses_cnt, correct_cnt / total_cnt
def create_submission(out_csv, model, data_loader):
model.eval()
offset = 0
# write header
output = pd.DataFrame({"ImageId": [], "Label": []})
output.to_csv(out_csv, index=False)
with torch.no_grad():
for x in data_loader:
out = model(x[0])
out = torch.argmax(out, dim=1)
# append entries
end = offset+len(x[0])
ids = range(offset+1, end+1)
offset = end
output = pd.DataFrame({"ImageId": ids, "Label": out})
output.to_csv(out_csv, mode='a', header=False, index=False)
def main():
n_epochs = 15
batch_size = 32
print("Prepare data")
train_loader, val_loader = build_trainval_loaders( | pd.read_csv(TRAIN_CSV) | pandas.read_csv |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from matplotlib import pyplot as plt
import numpy as np
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
# Load Yelp Data
businesses = | pd.read_json(dir_path + '/yelp_data/yelp_business.json',lines=True) | pandas.read_json |
#!/usr/bin/python3
from sys import argv
import sys
#from PyQt5 import QtCore, QtGui, uic, QtWidgets
#from PyQt5.QtWebEngineWidgets import *
#from PyQt5.QtCore import QUrl
import numpy as np
from jupyter_dash import JupyterDash
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
from sklearn import linear_model
plt.rcParams["figure.figsize"] = (15,15)
import math
import seaborn as sns
import shap
#from datetime import datetime
import time
from ipywidgets.embed import embed_minimal_html
from umap import UMAP
from pandas_profiling import ProfileReport
from sklearn.neighbors import kneighbors_graph
from prophet import Prophet
import umap
from lightgbm import LGBMRegressor,LGBMClassifier
from sklearn.preprocessing import *
from sklearn.decomposition import *
from sklearn.manifold import *
from sklearn.pipeline import make_pipeline
from sklearn.utils import estimator_html_repr
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import *
from sklearn.linear_model import *
import networkx as nx
from prophet.plot import plot_plotly, plot_components_plotly
import calendar
from prophet.utilities import regressor_coefficients
import plotly.express as px
#from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import base64
import numpy as np
import pandas as pd
from io import StringIO
import io
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import dash_table
import dash_cytoscape as cyto
from dash.exceptions import PreventUpdate
from keplergl import KeplerGl
import hdbscan
import datetime
from scipy.spatial import distance_matrix
from sklearn.metrics.pairwise import euclidean_distances
from scipy.stats import ttest_ind, ttest_1samp
from dash_table.Format import Format, Scheme, Trim
from sklearn.compose import make_column_transformer
from ipywidgets import AppLayout, Button, Layout, Accordion
from ipywidgets import Button, Layout, jslink, IntText, IntSlider, HBox, VBox
from ipywidgets import GridspecLayout
from sklearn.preprocessing import *
from sklearn.decomposition import *
from sklearn.manifold import *
from sklearn.pipeline import make_pipeline
from umap import UMAP
from sklearn.ensemble import *
from sklearn.linear_model import *
from joblib import Memory
from shutil import rmtree
import sklearn
from sklearn import svm, datasets
from sklearn.metrics import auc,confusion_matrix,plot_confusion_matrix,classification_report
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.compose import ColumnTransformer
from lightgbm import LGBMClassifier, LGBMRegressor
from skopt import BayesSearchCV, gp_minimize, forest_minimize, gbrt_minimize
from skopt.searchcv import BayesSearchCV as BSCV
from skopt.space import Real, Categorical, Integer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import set_config
from sklearn.ensemble import RandomForestClassifier as rf
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score, cross_validate, StratifiedKFold, KFold
from skopt.plots import plot_objective
from skopt.utils import use_named_args
from skopt.plots import plot_convergence
from sklearn.feature_selection import RFECV
from lightgbm import LGBMRegressor
from lightgbm import LGBMClassifier
from sklearn.preprocessing import StandardScaler
set_config(display='diagram')
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.style.use('seaborn')
from sklearn import linear_model
import math
import seaborn as sns
import shap
#from datetime import datetime
import time
import ipywidgets as widget
import dash_html_components as html
from sklearn.base import clone
#JupyterDash.infer_jupyter_proxy_config() un comment for binder use
def _force_plot_htmlsm(*args):
force_plot = shap.force_plot(*args, matplotlib=False)
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html,style={"width": "100%", "height": "400px", "border": 0})
#### things to add : memory managment
#### add n_jobs = -1 for everything
#### add featureUnion
class pipemaker2:
def __init__(self, df,ipt_pipe, target ,*, height = 'auto', width = 'auto'):
self.pipe_list = []
###### Dataframe!
self.df = df
###### App
self.target = widget.Select(options = list(self.df.columns), description = 'Target',rows=1 ,layout=Layout(height='auto', width='33%'))
self.target.value = target
self.TG = target
self.classifier = widget.Select(options = ['LGBMClassifier', 'LGBMRegressor'] + sklearn.ensemble.__all__ + sklearn.linear_model.__all__, description = 'Classifier',rows=1, layout=Layout(height='auto', width='33%'))
#### add column buttons
self.nColumns = widget.BoundedIntText( value=1,min=1,step=1,description='Number of column transformers:' ,layout=Layout(height='auto', width='33%'))
self.nColumns.observe(self.maketab, "value")
self.top_box = HBox([self.nColumns, self.target, self.classifier],layout=Layout(height='auto', width='100%'))
self.acc_list = [self.makeacc()]
self.check = 0
self.tab = widget.Tab()
self.tab.set_title(0, '0')
self.tab.children = self.acc_list
self.widget = VBox([self.top_box, self.tab])
self.cached_pipe = 0
self.location = 0
self.memory = 0
self.optimized_pipe = (0, 0)
self.input_pipe = ipt_pipe
def makeacc(self):
accordion = widget.Accordion(children=[
widget.Text(str(self.nColumns.value)),
widget.SelectMultiple(options=self.df.columns.values, description='columns',rows=len(self.df.columns)),
widget.Text(''),
widget.ToggleButtons(options= ['None'] + [x for x in sklearn.preprocessing.__all__ if x[0].isupper() ] ),
widget.ToggleButtons(options= ['None'] + [x for x in sklearn.decomposition.__all__ if x[0].isupper() ] ),
widget.ToggleButtons(options= ['None', 'UMAP'] + [x for x in sklearn.manifold.__all__ if x[0].isupper() ] )
])
accordion.set_title(0, 'Name of transformer')
accordion.set_title(1, 'Column to be transformed')
accordion.set_title(2, 'Manual input')
accordion.set_title(3, 'Sklearn preprocessing')
accordion.set_title(4, 'Sklearn decomposition')
accordion.set_title(5, 'Sklearn manifold')
accordion.selected_index = None
return accordion
def accordion_to_tuple(self, acc):
if acc.children[-4].value == '': transformer_list = [eval(x.value + '()') for x in acc.children[-3:] if x.value !='None' ]
else: transformer_list = eval('[' + acc.children[-4].value+ ']')
if len(transformer_list) > 0: pipe = make_pipeline( *transformer_list)
else: pipe = Pipeline(steps = [('empty','passthrough')])
self.check = (acc.children[0].value, pipe, tuple(acc.children[1].value))
return (acc.children[0].value, pipe,tuple(acc.children[1].value))
def maketab(self, change):
if self.nColumns.value > len(self.acc_list):
self.acc_list += [self.makeacc() for i in range(self.nColumns.value - len(self.acc_list))]
elif self.nColumns.value < len(self.acc_list):
self.acc_list = self.acc_list[:self.nColumns.value]
self.tab.children = self.acc_list
for num, acc in enumerate(self.acc_list):
self.tab.set_title(num, str(acc.children[0].value))
self.widget = VBox([self.top_box, self.tab])
def Pipe(self):
return clone(self.input_pipe) #Pipeline(steps = [('preprocessing', self.ColumnTransform()), ('classifier', eval(self.classifier.value + '()') )])
def Cache_pipe(self):
self.location = 'cachedir'
self.memory = Memory(location=self.location, verbose=0)
self.cached_pipe = self.Pipe().set_params(memory = self.memory)
def release_cache(self):
self.memory.clear(warn=True)
rmtree(self.location)
del self.memory
def display_app(self):
display(self.widget)
def ColumnTransform(self):
return ColumnTransformer([self.accordion_to_tuple(aco) for aco in self.acc_list])
def export_kwards(self):
return self.Pipe().get_params()
def fit_transform(self):
return self.ColumnTransform().fit_transform(self.df)
def fit_predict(self):
return self.Pipe().fit_predict(self.df, self.df[self.TG])
def fit(self):
return self.Pipe().fit(self.df, self.df[self.TG])
def RFECV(self):
preprocessed_df = pd.DataFrame(self.Pipe()['preprocessing'].fit_transform(self.df))
if self.optimized_pipe[1] == 0:
selector = RFECV(self.Pipe()['classifier'], step=1, cv=KFold(10, shuffle= True)).fit(preprocessed_df, self.df[self.TG])
else:
selector = RFECV(self.optimized_pipe[0]['classifier'], step=1, cv=KFold(10, shuffle= True)).fit(preprocessed_df, self.df[self.TG])
hX = np.array( range(1, len(selector.grid_scores_) + 1))
hY= selector.grid_scores_
H = pd.DataFrame(np.array([hX, hY]).T, columns = ['Number of parameters', 'Cross Validation Score'])
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(hX, hY)
plt.show()
return pd.DataFrame([selector.ranking_, selector.support_], columns = preprocessed_df.columns, index = ['Ranking', 'support'])
def make_skpot_var(self, param, temperature = 3, distribution = 'uniform', just_classifier = False): #'log-uniform'
value = self.export_kwards()[param]
if just_classifier == True: name = param.split('__')[1]
else: name = param
if value == 0 or value ==1: return
if type(value) == int:
if value == -1: return Integer(1, 200, name = name)
lower_bondary = int(value/temperature)
if lower_bondary < 2: lower_bondary = 2
upper_bondary = int(value*temperature) + lower_bondary
#if value <= 1: return Real(1e-3, 1, distribution ,name = name)
return Integer(lower_bondary, upper_bondary, distribution ,name = name)
if type(value) == float:
if value == -1: return Real(1, 200, name = name)
if value <= 1: return Real(1e-3, 1, distribution ,name = name)
lower_bondary = value/temperature
if lower_bondary < 2: lower_bondary = 2
upper_bondary = value*temperature + lower_bondary
return Real(lower_bondary, upper_bondary, distribution ,name = name)
def skopt_classifier_space(self, just_classifier = False):
dic = self.export_kwards()
classifier_params = [x for x in dic.keys()
if x.find('classifier__') != -1
and x.find('silent') == -1
and x.find('n_jobs') == -1
and x.find('bagging_fraction') == -1
and x != 'classifier__subsample'
and x != 'classifier__validation_fraction'] # and
SPACE = [self.make_skpot_var(i, just_classifier = just_classifier) for i in classifier_params]
SPACE = [x for x in SPACE if x if x != None ]
return SPACE
def objective(self, params):
classifier = self.Pipe().set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(), params)})
return -np.mean(cross_val_score(classifier, self.df, self.df[self.TG], cv = StratifiedKFold(n_splits = 5, shuffle=True)))
def objective_just_classifier(self, params, metric , cv_method ):
return -np.mean(cross_val_score(self.cached_pipe['classifier'].set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(just_classifier = 1), params)}),
self.transformed_opt,
self.target_opt,
scoring = metric,
cv = cv_method,
n_jobs = -1))
def objective_cached(self, params):
return -np.mean(cross_val_score(self.cached_pipe.set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(), params)}),
self.df,
self.df[self.TG],
cv = StratifiedKFold(n_splits = 5, shuffle=True)))
def optimize_classifier(self, n_calls = 50, cache = False):
if cache:
self.Cache_pipe()
result = gp_minimize(self.objective_cached, self.skopt_classifier_space() , n_calls=n_calls)
self.release_cache()
else: result = gp_minimize(self.objective, self.skopt_classifier_space() , n_calls=n_calls)
#plot_convergence(result)
#_ = plot_objective(result, n_points=n_calls)
#print(result.fun)
return {'result': result, 'best_params': self.get_params(result, self.skopt_classifier_space() )}
def fast_optimize_classifier(self, n_calls = 50, is_classifier = True):
self.Cache_pipe()
self.transformed_opt = self.cached_pipe['preprocessing'].fit_transform(self.df)
self.target_opt = self.df[self.TG]
if is_classifier:
cv_method = StratifiedKFold(n_splits = 5, shuffle=True)
metric = 'f1_weighted'
else:
cv_method = KFold(n_splits = 5, shuffle=True)
metric = 'r2'
result = gp_minimize(lambda x: self.objective_just_classifier(x, metric, cv_method), self.skopt_classifier_space(just_classifier = True) , n_calls=n_calls)
self.release_cache()
best_params = self.get_params(result, self.skopt_classifier_space(just_classifier = True))
best_params = {'classifier__'+ i[0]:i[1] for i in best_params.items()}
self.optimized_pipe = (self.Pipe().set_params(**best_params), 1)
return {'result': result, 'best_params':best_params}
def get_params(self, result_object, space):
try:
return { i.name: result_object.x[num] for num, i in enumerate(space) }
except:
raise
def Vis_Cluster(self, method):
transformed = self.Pipe()['preprocessing'].fit_transform(self.df)
classsification = method.fit_predict(transformed) #(*args, **kwds)
end_time = time.time()
palette = sns.color_palette('deep', np.unique(classsification).max() + 1)
colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in classsification]
plt.scatter(transformed.T[0], transformed.T[1], c=colors, s = MinMaxScaler(feature_range=(30, 300)).fit_transform(self.df[self.TG].values.reshape(-1, 1)) , **{'alpha' : 0.5, 'linewidths':0})
frame = plt.gca()
for num, spine in enumerate(frame.spines.values()):
if num == 1 or num == 3: spine.set_visible(False)
plt.title('Clusters found by {}'.format(str(method)), fontsize=24)
plt.show()
return
def Evaluate_model(self):
tprs = []
aucs = []
prd = []
tru = []
mean_fpr = np.linspace(0, 1, 100)
X = self.df.copy()
y = self.df[self.TG]
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
fig, ax = plt.subplots(1, 2, figsize = (20,10))
try:
for i, (train, test) in enumerate(StratifiedKFold(n_splits=5, shuffle=True).split(X, y)):
clf.fit(X.iloc[train], y.iloc[train])
viz = plot_roc_curve(clf, X.iloc[test], y.iloc[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax[0])
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax[0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax[0].plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax[0].fill_between(mean_fpr, tprs_lower, tprs_upper, color='steelblue', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax[0].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# title="Receiver operating characteristic example")
ax[0].legend(loc="lower right")
except: print('non-binary classifier')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
try:
plot_confusion_matrix(clf.fit(X_train, y_train), X_test, y_test,
display_labels=['negative detection', 'positive detection'],
cmap=plt.cm.Blues, ax = ax[1])
ax[1].grid(False)
except: print('is it a regressor?')
fig.tight_layout()
try:
report = classification_report(clf.predict(X_test), y_test, output_dict=True) # target_names=['Negative detection', 'Positive detection']
except: #### report for regression
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
report = cross_validate(clf, X, y, cv=5, scoring=('neg_mean_absolute_percentage_error','r2','explained_variance', 'max_error', 'neg_mean_absolute_error', 'neg_mean_squared_error'))
fig, ax = plt.subplots(1, 1, figsize = (1,1))
return report, fig
def named_preprocessor(self):
naming_features = []
for transformer in self.Pipe()['preprocessing'].transformers:
transformed = ColumnTransformer(transformers = [transformer]).fit_transform(self.df)
if transformed.shape[1] == len(transformer[2]):
naming_features += list(transformer[2])
else:
naming_features += [transformer[0] +'__'+ str(i) for i in range(transformed.shape[1]) ]
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
return pd.DataFrame(clf['preprocessing'].fit_transform(self.df), columns = naming_features)
def Shapley_feature_importance(self):
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
shap.initjs()
dat_trans = self.named_preprocessor()
explainer = shap.TreeExplainer(clf['classifier'].fit(dat_trans, self.df[self.TG])) #,feature_perturbation = "tree_path_dependent"
shap_values = explainer.shap_values(dat_trans)
#### force-plot
a = [_force_plot_htmlsm(explainer.expected_value[i], shap_values[i], dat_trans) for i in len(shap_values)]
#### heatmap
#try: hmap = shap.TreeExplainer(clf['classifier'].fit(dat_trans, self.df[self.TG]), dat_trans) #redo check additivity
#except:
# print('Failed in heatmap, using LGBMC instead')
# hmap = shap.TreeExplainer(LGBMClassifier().fit(dat_trans, self.df[self.TG]), dat_trans)
#fig, ax = plt.subplots(1,1, figsize=(15, 15))
#shap.plots.heatmap(hmap(dat_trans)) ### figure is fig
### dependence matrix
ivalues = explainer.shap_interaction_values(dat_trans)
figdm, axdm = plt.subplots(len( dat_trans.columns), len(dat_trans.columns), figsize=(15, 15))
d = {i: name for i,name in enumerate(dat_trans.columns)}
for i in d.keys():
for j in d.keys():
shap.dependence_plot((d[i], d[j]), ivalues[1], dat_trans, ax = axdm[i,j], show = False)
### dependence plots
#figdp, axdp = plt.subplots( len(dat_trans.columns)//4+1, 4, figsize=(15, 15))
#for num, col in enumerate(dat_trans.columns):
# shap.dependence_plot(col, shap_values[1], dat_trans, ax = axdp[num//4,num%4], show= False)
return (a, figdm) #fig,
cyto.load_extra_layouts()
height, width = [500,500]
canvas_width = 500
canvas_height = round(height * canvas_width / width)
scale = canvas_width / width
def plotly_cyt(d):
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {k:i['data'][k] for k in ('id', 'value', 'name') }, 'position' : dict(zip(('x', 'y'),i['data']['data']))} for i in d['nodes']]
return nodes + edges
def plotly_cyt2(G):
d = nx.cytoscape_data(G)['elements']
pos = nx.spring_layout(G)
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {k:i['data'][k] for k in ('id', 'value', 'name') }, 'position' : dict(zip(('x', 'y'),j))} for i,j in zip(d['nodes'], list(pos.values()))]
return nodes + edges
def plotly_cyt3(G):
d = nx.cytoscape_data(G)['elements']
pos = nx.spring_layout(G)
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {**{k:i['data'][k] for k in ('id', 'value', 'name') }, **{'degree': degree[1]}} , 'position' : dict(zip(('x', 'y'),j))}
for i,j,degree in zip(d['nodes'], list(pos.values()), list(G.degree))]
return nodes + edges
def make_colormap_clustering(column, palette, continuous, data):
if not continuous:
lut = dict(zip(sorted(data[column].unique()), sns.color_palette(palette, len(data[column].unique()))))
else: lut = sns.color_palette(palette, as_cmap=True)
return data[column].map(lut)
def _force_plot_html(*args):
force_plot = shap.force_plot(*args, matplotlib=False, figsize=(18, 18))
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html, height='1800', width='1800',style={"border": 0})#
def mplfig2html(figure):
pic_IObytes2 = io.BytesIO()
figure.savefig(pic_IObytes2, format='png')
figure.clear()
pic_IObytes2.seek(0)
return html.Img(src ='data:image/png;base64,{}'.format(base64.b64encode(pic_IObytes2.read()).decode()))
def mpl2plotlyGraph(figure):
return dcc.Graph(ptools.mpl_to_plotly(figure)) #image_height: int=600,image_width: int=800
# Build App
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.MINTY]) #FLATLY, LUMEN, SUPERHERO
#server = app.server add this for binder
def convert2cytoscapeJSON(G):
# load all nodes into nodes array
final = {}
final["nodes"] = []
final["edges"] = []
for node in G.nodes():
nx = {}
nx["data"] = {}
nx["data"]["id"] = node
nx["data"]["label"] = node
final["nodes"].append(nx.copy())
#load all edges to edges array
for edge in G.edges():
nx = {}
nx["data"]={}
nx["data"]["id"]=edge[0]+edge[1]
nx["data"]["source"]=edge[0]
nx["data"]["target"]=edge[1]
final["edges"].append(nx)
return json.dumps(final)
upload_tab = [
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("qPCR files", className="display-3"),
html.P('We are expecting csv files from an export file from a cfx96',className="lead",),
html.Hr(className="my-2"),
dbc.Row([
dbc.Col(html.H4("Column of qPCR files to merge with habitat metadata:") , width = 4),
dbc.Col(dcc.Dropdown(options = [{"label": "Sample", "value": 'Sample'}] , value = 'Sample', id='qpcrdf', disabled = True), width = 3)]),
dcc.Upload(id='upload-qPCR2',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%',
'height': '120px',
'lineHeight': '120px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'font-size': '20px',
'borderRadius': '5px',
'justify-content': 'center',
'textAlign': 'center',
'margin': '10px'}, multiple=True),
html.Div(id='qpcr-data-upload') ]), width = 12),justify="center",no_gutters=True),
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("Habitat metadata", className="display-3"),
html.P('You probably have a separate file with Lat, Lon and other environmental parameters',className="lead",),
html.Hr(className="my-2"),
dbc.Row([
dbc.Col(html.H4("Column of Habitat metadata file to merge with qPCRs:") , width = 4),
dbc.Col(dcc.Dropdown(id='habitatdf'), width = 3)]),
dcc.Upload(id='upload-habitat',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%',
'height': '120px',
'lineHeight': '120px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'font-size': '20px',
'justify-content': 'center',
'textAlign': 'center',
'margin': '10px'},multiple=True),
html.Div(id='habitat-data-upload') ]), width = 12),justify="center",no_gutters=True),
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("Send complete dataset directly", className="display-3"),
dcc.Upload(id='upload_dataset_directly',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%', 'height': '120px', 'lineHeight': '120px', 'font-size': '20px', 'borderWidth': '2px', 'borderStyle': 'dashed', 'borderRadius': '5px', 'textAlign': 'center', 'margin': '10px'},multiple=False),
html.Div(id='direct_dataframe_upload_name')
]), width = 12),justify="center",no_gutters=True)
]
merge_tab = [
dbc.Jumbotron([
html.H1("Merged dataset overview ", className="display-3"),
html.P('Look for parameters that have unexpected behavior, dataset size and other possible concerns with data integrity',className="lead",),
html.Hr(className="my-2"),html.P(""),
dcc.Loading(id="loading-1",type="default", children=html.Div(id='Merged_df', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '90%'} ) )
]),
]
VIS = [dbc.Row(dbc.Col(html.Div( id = 'keplermap', style = {'overflow': 'hidden'}), width="100%",style = {'overflow': 'clip'}), no_gutters=True,justify="center", style = {'overflow': 'hidden'}),]
kep_tab=[ dbc.Row([
dbc.Col(
[dbc.Row([
dbc.Jumbotron([
html.H4("what are the continous columns for the UMAP?", id = 'kep_tab_continuous_columns_target'),
dbc.Popover([ dbc.PopoverHeader("how we look at continuous data"),dbc.PopoverBody("https://umap-learn.readthedocs.io/en/latest/basic_usage.html")],target="kep_tab_continuous_columns_target",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=True, id = 'UMAP_cont'),
html.H4("what are the categorical columns for the UMAP?", id = 'kep_tab_cat_columns_target'),
dbc.Popover([ dbc.PopoverHeader("how we look at categorical data"),dbc.PopoverBody("see https://umap-learn.readthedocs.io/en/latest/composing_models.html#diamonds-dataset-example")],target="kep_tab_cat_columns_target",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=True, id = 'UMAP_cat'),
html.H4("Do you want to fit the UMAP to a feature?", id = 'keep_tab_metric_learn'), #https://umap-learn.readthedocs.io/en/latest/supervised.html
dbc.Popover([ dbc.PopoverHeader("fitting umap to feature"),dbc.PopoverBody("https://umap-learn.readthedocs.io/en/latest/supervised.html")],target="keep_tab_metric_learn",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=False, id = 'UMAP_y'),
html.H4("How many neighboors for the UMAP to use?", id = 'keep_tab_nneighboors'),
dbc.Popover([ dbc.PopoverHeader("n neighboors parameter"),dbc.PopoverBody("This parameter controls how UMAP balances local versus global structure in the data. It does this by \
constraining the size of the local neighborhood UMAP will look at when attempting to learn the manifold structure of the data. \
This means that low values of n_neighbors will force UMAP to concentrate on very local structure (potentially to the detriment of the big picture),\
while large values will push UMAP to look at larger neighborhoods of each point when estimating the manifold structure of the data, \
losing fine detail structure for the sake of getting the broader of the data. _ see https://umap-learn.readthedocs.io/en/latest/parameters.html#n-neighbors")],target="keep_tab_nneighboors",trigger="hover",),
dbc.Input(id="n_neighboors", type="number", value = 15, min = 10, max = 1000), #https://umap-learn.readthedocs.io/en/latest/parameters.html#n-neighbors
html.H4('Type of scaling to use:', id= 'kep_tab_scale'),
dbc.Popover([ dbc.PopoverHeader("Should I scale my data?"),dbc.PopoverBody("The default answer is yes, but, of course, the real answer is “it depends”. \
If your features have meaningful relationships with one another (say, latitude and longitude values) then normalising per feature is not a good idea. \
For features that are essentially independent it does make sense to get all the features on (relatively) the same scale. \
The best way to do this is to use pre-processing tools from scikit-learn. All the advice given there applies as sensible preprocessing for UMAP,\
and since UMAP is scikit-learn compatible you can put all of this together into a scikit-learn pipeline.")],target="kep_tab_scale",trigger="hover",),
dbc.RadioItems(id="UMAP_radio",
options=[
{"label": "No Standardization", "value": 1},
{"label": "Standard scaler", "value": 2},
{"label": "Pipeline from machine learning tab","value": 3}],value = 2,
labelCheckedStyle={"color": "#223c4f", 'font-size': '18px'},
labelStyle = {}, style = {'font-size': '18px', 'margin' : '10px', 'margin-left': '60px' ,'transform':'scale(1.2)'}, switch=True,
inputStyle = { }
),
dbc.Button("Generate UMAP", color="info", size = 'lg', className="mr-1", block=True, id='UMAP_start') ]),
dbc.Popover([ dbc.PopoverHeader("what is UMAP?"),dbc.PopoverBody("see https://umap-learn.readthedocs.io/en/latest/how_umap_works.html \nhttps://umap-learn.readthedocs.io/en/latest/scientific_papers.html\nhttps://umap-learn.readthedocs.io/en/latest/faq.html#what-is-the-difference-between-pca-umap-vaes")],target="UMAP_start",trigger="hover",),
])],width=2) ,
dbc.Col([dcc.Loading(id="loading-umap",type="default", children= dcc.Tabs([
dcc.Tab(label = 'umap-view', children = [html.Div(dcc.Graph(id='UMAP_view'), style = {'height': '1200px', 'width' : '1500px','margin-left':'30px'}),html.Div( id = 'umap_selected_stats', style = {'width': '98%'})] ),
dcc.Tab(label = 'heatmap/cytoscape', children = html.Div( id = 'cytoscape', style = {'justify-content': 'center'} )),
dcc.Tab(label = 'hdbscan clustering', children = html.Div(id='graph') ),
], style = {'justify-content': 'center', 'width': '100%','margin-left': '12px','overflow': 'clip'})) ], width=10, style = {'overflow': 'clip'})], no_gutters=True)] #
#className="nav nav-pills" , no_gutters=True autosize=False
time_series_tab = [
dbc.Row([
dbc.Col( dbc.Jumbotron([
html.H4("Target column"),
dcc.Dropdown(options=[],value=[], multi=False, id = 'prophet_y'),
html.H4("Datetime column"),
dcc.Dropdown(options=[],value=[], multi=False, id = 'prophet_ds'),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4("Additional regressors"),
dcc.Dropdown(options=[],value=[], multi=True, id = 'prophet_regressors'),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Rolling average'),
html.H5('number of days'),
dbc.Input(id="prophet_rolling_average", type="number", value = 0, min = 0, max = 366, step = 0.25),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4("Growth"),
dcc.Dropdown(options=[
{"label": "logistic", "value": 'logistic'},
{"label": "flat", "value": 'flat'},
{"label": "linear", "value": 'linear'}
],value='linear', multi=False,id = 'prophet_growth'),
html.H4("Target maximum value"),
dbc.Input(id="prophet_cap", type="number", value = 1, step = .01),
html.H4("Target minimum value"),
dbc.Input(id="prophet_floor", type="number", value = 0, step = .01),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Seasonnality'),
html.H5('frequency'),
dbc.Checklist( options = [
{"label": "Yearly", "value": 'yearly_seasonality'},
{"label": "Weekly", "value": 'weekly_seasonality'},
{"label": "Daily", "value": 'daily_seasonality'},
] ,value=['yearly_seasonality'], id = 'prophet_seasonality' ,
style = {'font-size': '18px', 'margin' : '10px', 'margin-left': '60px' ,'transform':'scale(1.2)'}, switch=True ),
html.H5('mode'),
dcc.Dropdown(options=[
{"label": "additive", "value": 'additive'},
{"label": "multiplicative", "value": 'multiplicative'}
], multi=False,id = 'seasonality_mode', value = 'additive'),
html.H5('scale'),
dbc.Input(id="season_prior", type="number", value = 10, min = 1, max = 100),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Change points'),
html.H5('quantity'),
dbc.Input(id="prophet_n_change_points", type="number", value = 25, min = 0, max = 100,step =1),
html.H5('scale'),
dbc.Input(id="changepoint_prior", type="number", value = .05, min = 0, max = 10., step = 0.01),
html.H5('range'),
dbc.Input(id="changepoint_range", type="number", value = .8, min = 0.1, max = 1., step = 0.01),
]), width = 2),
dbc.Col(dcc.Loading(id="loading-prophet",type="default", children=html.Div(id='prophet_plots', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '100%'} ), style= {'margin-top': '100px'})),
dbc.Col( dbc.Jumbotron([
html.H4('Forecast'),
html.H5('prediction range'),
dcc.DatePickerRange(id= 'prophet_future_dates', display_format='MMM DD YYYY'),
html.Hr(style= {'margin-bottom': '50px'}),
html.H5('remove these month'),
dcc.Dropdown(options=[ {"label": calendar.month_name[num], "value": num} for num in range(1,12)],value=[], multi=True,id = 'prophet_remove_months'),
html.H5('remove these days of the week'),
dcc.Dropdown(options=[ {"label": day_name, "value": num} for num,day_name in enumerate(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])],
value=[], multi=True,id = 'prophet_remove_days_of_the_week'),
html.H5('remove these hours of the day'),
dcc.Dropdown(options=[ {"label": str(num)+':00-'+str(num+1)+':00', "value": num} for num in range(0,24)],value=[], multi=True,id = 'prophet_remove_hours'),
html.Hr(style= {'margin-bottom': '70px'}),
dbc.Button("Run forecast", color="info", size = 'lg', className="mr-1", block=True, id='run_prophet')
]) , width = 2)
], no_gutters=True, style={'margin-bottom': '10px'})
]
transformers = [x for x in sklearn.preprocessing.__all__ + ['UMAP'] + sklearn.decomposition.__all__ + sklearn.manifold.__all__ if x[0].isupper() and x != 'SparseCoder'] + ['passthrough']
transformer_options = [ {'label': x, 'value': x } for x in transformers] # eval(x+ '()')
ML_tab = [
dbc.Row([
dbc.Col(
[dbc.Jumbotron([
dbc.Row([
dbc.Col([ html.H4("number of transformers:")]),
dbc.Col([#dcc.Dropdown(options=[ {'label': str(x), 'value': str(x)} for x in range(10)],value='2', multi=False,clearable=False, id = 'n_tabs')
dbc.Input(id="n_tabs", type="number", value = 2, min = 1, max = 10)
]),
dbc.Col([html.H4("Target:")]),
dbc.Col([dcc.Dropdown(options=[],value=[], multi=False, id = 'ML_target',clearable=False)]),
dbc.Col([html.H4("Classifier:", id = 'ml_tab_classifier'), dbc.Popover([ dbc.PopoverHeader("chosing a classifier"),dbc.PopoverBody('see: \
https://scikit-learn.org/stable/supervised_learning.html#supervised-learning\n https://lightgbm.readthedocs.io/en/latest/Quick-Start.html ')],target="ml_tab_classifier",trigger="hover",)]),
dbc.Col([dcc.Dropdown(options=[ {'label': x, 'value': x} for x in ['LGBMClassifier', 'LGBMRegressor'] + sklearn.ensemble.__all__ + sklearn.linear_model.__all__]
,value = 'RandomForestClassifier', multi=False, id = 'clf_disp', clearable=False)]) ])]),
dbc.Jumbotron([dbc.Row([dbc.Col(
[html.H4("Columns to be transformed:")] +
[ dcc.Dropdown(options= ['0'], value = ['0'],multi=True,clearable=False, id = 'Columns_'+ str(i)) for i in range(3)], id = 'preprocessing_columns'),
dbc.Col(
[html.H4("Column transformers:", id = 'ml_tab_column_trans')] + #https://scikit-learn.org/stable/modules/preprocessing.html#
[ dcc.Dropdown(options= transformer_options, value = ['passthrough'], multi=True,clearable=False, id = 'ColumnTransformer_'+ str(i)) for i in range(3)], id = 'preprocessing_functions'),
dbc.Popover([ dbc.PopoverHeader("preprocessing the data"),dbc.PopoverBody("see:\n https://scikit-learn.org/stable/modules/preprocessing.html\n\
https://scikit-learn.org/stable/modules/decomposition.html#decompositions#\nhttps://scikit-learn.org/stable/modules/clustering.html#clustering")],target="ml_tab_column_trans",trigger="hover",)
])])
],width=6, id='ml_user_input'), ] + [dbc.Col([dbc.Button("Update Pipeline", color="info", size = 'lg', className="mr-1", block=True, id='submit_pipe'),
html.Div(id = 'show_pipeline', style ={'width': '50%','borderWidth': '0px' ,'border': 'white'})], width = 6)], no_gutters=True,justify="center"),
dbc.Row([dbc.Col(
dbc.Jumbotron([
dbc.Row([ html.H1("Testing the pipeline", style ={'margin': '20px'})]), #,justify="center"
dbc.Row([dbc.Col([html.H4("Number of runs for hyperparameter optimization:", id = 'ml_tab_tunning')], width = 3),
dbc.Popover([ dbc.PopoverHeader("Tunning the model"),dbc.PopoverBody("here we use scikit optimize's bayesian optimization to tune the hyperparameters\
https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.html")],target="ml_tab_tunning",trigger="hover",),
dbc.Col([dbc.Input(id="slider_hyperopt", type="number", value = 50, min = 10, max = 1000)], width = 1)], no_gutters=True, style={'margin-bottom': '10px'}), #
dbc.Row([dbc.Button("Run pipeline", color="info", size = 'lg', className="mr-1", block=True, id='run_ML')]),
dcc.Loading(id="loading-ml",type="default", children=html.Div(id = 'ml_results', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '2200', 'height' : '1400px'}),
style= {'margin-top': '-300px','justify-content': 'center'})])
, width = 12, style = {'justify-content': 'center', 'height' : '2000px'}) ], no_gutters=True)
]
# html.Iframe(srcDoc = ret_map._repr_html_().decode(), height='1280', width='2350') iframe for html representation of pipeline sklearn
tab_style = {
"background": "#223c4f",
'color': "#6cc3d5",
'text-transform': 'lowercase',
'border': '#223c4f',
'font-size': '12px',
'font-weight': 200,
'align-items': 'center',
'justify-content': 'center',
'border-radius': '0px',
#'padding':'6px'
}
tab_selected_style = {
"background": "#153751",
'color': 'white',
'text-transform': 'uppercase',
'font-size': '12px',
'font-weight': 200,
'align-items': 'center',
'justify-content': 'center',
#'box-shadow': '60px 0 #223c4f, -60px 0 solid #223c4f',
'border-style': 'solid #223c4f',
'border-color': '#223c4f',
'border-width': '0',
#'border-radius': '50px'
}
app.layout = html.Div([
dbc.NavbarSimple([], brand = 'Sars-Cov-2 genome viewer', brand_style ={'color': "white",'font-size': '14px'} ,
style = { 'align-items': 'left','justify-content': 'left', 'font-size': '14px', 'height': '40px'},
color = "#223c4f"),
dcc.Store(id='all_qPCR_concat', storage_type='memory'), #storage_type='local'
dcc.Store(id='habitatcsv', storage_type='memory'), #df_with_umap
dcc.Store(id='df', storage_type='memory'),
dcc.Store(id='df_with_umap', storage_type='memory'),
dcc.Store(id='umap_select_columns', storage_type='memory'),
dcc.Store(id='selected_points_umap', storage_type='memory'), #html.Table(id='all_dfs') selected_points_umap
dcc.Tabs([
dcc.Tab(label = 'Dataset', children = upload_tab , style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label = 'Quality Control', children = merge_tab , style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Exploratory Data Analysis', children=kep_tab, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Geoposition', children=VIS, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Time Series', children=time_series_tab, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Machine Learning', children=ML_tab, style=tab_style, selected_style=tab_selected_style)],className="nav nav-pills") ,
])
def get_cq_list(x, df):
a = df[df.Sample == x].Cq.values
return [60 if np.isnan(x) else x for x in a]
def get_det_list(x, df):
a = df[df.Sample == x].Call.values
return [1 if x=='(+) Positive' else 0 for x in a]
def FIND_Better(row, column, df):
series = df[df.index == str(row['SiteID'])][column]
if series.shape[0] == 0: return -1
return series.iloc[0]
cyto.load_extra_layouts()
@app.callback(Output(component_id= 'Merged_df', component_property ='children'),
Output(component_id= 'df', component_property ='data'),
Output(component_id= 'direct_dataframe_upload_name', component_property = 'children'),
Input('habitatdf', 'value'),
Input('upload_dataset_directly', 'contents'),
State('upload_dataset_directly', 'filename'),
State('upload_dataset_directly', 'last_modified'),
State('all_qPCR_concat', 'data'),
State('habitatcsv', 'data'))
def merge_csv_update_spreadsheet(hab, up_content, up_filename, up_date , df_qpcr_json, df_hab_json): #qpcr,
ctx = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if hab != None and ctx == 'habitatdf': # and qpcr != None:
try : left_merge, right_merge = hab, 'Sample' #qpcr
except:
return html.Div(), html.Hr(className="my-2"), html.Div(),
try: df, df_hab = pd.read_json(df_qpcr_json), pd.read_json(df_hab_json)
except Exception as e:
return html.H4('no data'), html.Hr(className="my-2"), html.Div(),
describe = df.groupby('Sample').describe(include = 'all', percentiles = [])
frequencies = pd.DataFrame(describe['Cq']['count']/describe['Fluor']['count'], columns = ['eDNA_frq']) #### this is not working properly for some reason?!
frequencies = frequencies.fillna(-1)
frequencies['min_Cq'] = describe['Cq']['min']
frequencies['N_samples'] = describe['Fluor']['count']
frequencies = frequencies.reset_index()
frequencies['list_of_CQs'] = frequencies.Sample.apply(lambda x: get_cq_list(x, df))
frequencies['CQ_average'] = frequencies.list_of_CQs.apply(lambda x: np.array(x).mean())
frequencies['list_of_detections'] = frequencies.Sample.apply(lambda x: get_det_list(x, df))
frequencies['eDNA_detection_average'] = frequencies.list_of_detections.apply(lambda x: np.array(x).mean())
frequencies['eDNA_binary'] = frequencies['eDNA_detection_average'].apply(lambda x: 1 if x > .1 else 0)
frequencies = frequencies.fillna(60)
final = df_hab.merge(frequencies,left_on = left_merge, right_on = right_merge, how = 'inner' )
return dbc.Jumbotron([ html.H1("Overview of your dataset", className="display-3"),
html.Iframe(srcDoc = ProfileReport(final, correlations=None).to_html(), height='18000', width='2350')]), final.to_json(), html.Div() #interactions=None,
elif up_content != None and ctx == 'upload_dataset_directly':
content_type, content_string = up_content.split(',')
decoded = base64.b64decode(content_string)
if '.csv' in up_filename:
final = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
elif '.tsv' in up_filename:
final = pd.read_csv(io.StringIO(decoded.decode('utf-8')), sep='\t')
else: return html.Div('file type not accepted, is it a csv or a tsv?'),html.Div(), html.Div([ html.H5(up_filename), html.Hr() ])
return [ html.Iframe(srcDoc = ProfileReport(final, correlations=None).to_html(), height='18000', width='2000')],final.to_json(), html.Div([html.H5(up_filename), html.Hr()]) #2350 interactions=None,
else: return html.Div(),html.Div(), html.Div()
def inpt_children_to_pipe(columns, funcs, classif):
C = [x['props']['value'] for x in columns[1:]]
F = [x['props']['value'] for x in funcs[1:]]
if classif == 'LGBMClassifier':
return Pipeline(steps = [('preprocessing', make_pipe(C, F)), ('classifier', eval(classif + "(boosting_type='gbdt', subsample=1.0)") )]) #boosting_type='gbdt', bagging_fraction = 0
return Pipeline(steps = [('preprocessing', make_pipe(C, F)), ('classifier', eval(classif + '()') )])
def make_pipe(columns_list, transformer_list):
simplfy = []
for num, (cols, trans) in enumerate(zip(columns_list, transformer_list) ):
sub_smp = []
for x in trans:
if x[0].isupper() == True:
if x == 'PCA': sub_smp += [PCA(n_components = 2)]
else: sub_smp += [eval(x+ '()')]
else: sub_smp += [x]
simplfy += [tuple([str(num), make_pipeline(*sub_smp), tuple(cols)])]
return ColumnTransformer(simplfy)
#simplfy =[ tuple([str(num), make_pipeline(*[eval(x+ '()') if x[0].isupper() == True else x for x in trans ]), tuple(cols)]) for num, (cols, trans) in enumerate(zip(columns_list, transformer_list) )]
#return ColumnTransformer(simplfy)
@app.callback(Output(component_id= 'ml_results', component_property ='children'),
Input(component_id = 'run_ML', component_property = 'n_clicks'),
State(component_id= 'preprocessing_functions', component_property ='children'),
State(component_id= 'preprocessing_columns', component_property ='children'),
State(component_id = 'clf_disp', component_property = 'value'),
State(component_id = 'df', component_property = 'data'),
State(component_id = 'ML_target', component_property = 'value'),
State(component_id = 'slider_hyperopt', component_property = 'value'))
def run_ML(clicked, f_list, c_list, val, data, target, ncalls):
pipe = inpt_children_to_pipe(c_list,f_list, val)
try: df = pd.read_json(data,convert_dates = False)
except: return html.Div()
Maj = pipemaker2(df, pipe, target)
try:
opt_results = Maj.fast_optimize_classifier(n_calls= int(ncalls))
new_pipe2 = [html.Iframe(srcDoc = estimator_html_repr(Maj.optimized_pipe[0]), height='450', width='1150', hidden = 'hidden')]
except:
try:
opt_results = Maj.fast_optimize_classifier(n_calls= int(ncalls), is_classifier= False)
new_pipe2 = [html.Iframe(srcDoc = estimator_html_repr(Maj.optimized_pipe[0]), height='450', width='1150', hidden = 'hidden')]
except:
new_pipe2 = [html.Iframe(srcDoc = estimator_html_repr(Maj.Pipe()), height='450', width='1150', hidden = 'hidden')]
Maj = pipemaker2(pd.read_json(data,convert_dates = False), inpt_children_to_pipe(c_list,f_list, val), target)
try:
scores, fig = Maj.Evaluate_model()
rev_table = pd.DataFrame(scores).T.reset_index()
graph_part = mplfig2html(fig)
scoreshtml = [dash_table.DataTable( data=rev_table.to_dict('records'), columns=[{'name': str(i), 'id': str(i)} for i in rev_table.columns], style_table={'overflowX': 'auto'},
style_cell={'minWidth': '180px', 'width': '180px', 'maxWidth': '180px','overflow': 'hidden','textOverflow': 'ellipsis'}),graph_part]
except: scoreshtml = [html.H3('Failed evaluate scores: is it a regressor?', className="display-3") ]
#fplot, fig2 = Maj.Shapley_feature_importance() #fig1,
##### shapley graphs
if Maj.optimized_pipe[1] == 0: clf = Maj.Pipe()
else: clf = Maj.optimized_pipe[0]
new_pipe = html.Iframe(srcDoc = estimator_html_repr(clf), height='450', width='1150', hidden = True)
#fig, ax = plt.subplots(figsize=(15, 15))
shap.initjs()
dat_trans = Maj.named_preprocessor()
try:
explainer = shap.TreeExplainer(clf['classifier'].fit(dat_trans, Maj.df[Maj.TG]), dat_trans) ######## added dat_trans here ____________________remove if breaks!!!
shap_values = explainer.shap_values(dat_trans, check_additivity=False) #,feature_perturbation = "tree_path_dependent"
except:
explainer = shap.Explainer(clf['classifier'].fit(dat_trans, Maj.df[Maj.TG]), dat_trans)
shap_values = explainer.shap_values(dat_trans)
#### summary plot
fig_summary, ax = plt.subplots(figsize=(15, 15))
shap.summary_plot(shap_values,dat_trans, plot_type='bar',plot_size=(10,10), max_display=20,show= False)
sumhtml = [mplfig2html(fig_summary)]
#### force-plot
try: a = [_force_plot_html(explainer.expected_value[i], shap_values[i], dat_trans) for i in range(len(shap_values))]
except: a = [_force_plot_html(explainer.expected_value, shap_values, dat_trans) ]
### dependence matrix
try:
ivalues = shap.TreeExplainer(clf['classifier'].fit(dat_trans, Maj.df[Maj.TG])).shap_interaction_values(dat_trans)
figdm, axdm = plt.subplots(len( dat_trans.columns), len(dat_trans.columns), figsize=(15, 15))
#d = {i: name for i,name in enumerate(dat_trans.columns)}
#for i in d.keys():
# for j in d.keys():
# shap.dependence_plot((d[i], d[j]), ivalues, dat_trans, ax = axdm[i,j], show = False)
#fig2html = mplfig2html(figdm)
shap.summary_plot(ivalues, dat_trans, show= False)
####erase here if necessary
figdm = plt.gcf()
figdm.set_figheight(15)
figdm.set_figwidth(15)
figdm.tight_layout()
fig2html = mplfig2html(figdm)
except:
fig2html = html.H5("Shapley interaction matrix only available for tree-based models")
#### heatmap
try:
try : shap.plots.heatmap(explainer(dat_trans), show= False)
except : shap.plots.heatmap(explainer(dat_trans), show= False, check_additivity=False)
fig1 = plt.gcf()
fig1.set_figheight(15)
fig1.set_figwidth(15)
fig1.tight_layout()
fig1html = mplfig2html(fig1)
#fig1_1, ax = plt.subplots(1,1, figsize=(15, 15))
#shap.plots.bar(hmap(dat_trans, check_additivity=False), show = False)
#fig1_1 = plt.gcf()
#fig1_1.set_figheight(10)
#fig1_1.set_figwidth(10)
#fig1_1html = mplfig2html(fig1_1)
heatmapfigs = [fig1html]
except:
heatmapfigs = [html.H5('heatmap is only available in binary classification')]
if val == "LGBMClassifier" or val == 'LGBMRegressor':
decision_tree, ax = plt.subplots(1,1, figsize=(15, 15))
plot_tree(clf['classifier'], ax=ax, show_info = ['leaf_count','data_percentage','internal_value', 'internal_weight', 'split_gain'])
lgbmfig = [mplfig2html(decision_tree)]
else:
lgbmfig = []
figure_names = ['scores', 'roc-auc & cm', 'feature importance'] + ['force-plot feat'+ str(i) for i in range(len(a))] + ['heatmap', 'feature interaction'] + ['decision_tree' for x in lgbmfig]
ml_all_figures = scoreshtml+ sumhtml +a +heatmapfigs + [fig2html] + lgbmfig
ml_result_tabs = dcc.Tabs([dcc.Tab(children = html.Div(content, style = {'justify-content': 'center', 'margin': '0 auto', 'width': '2200px', 'height' : '1400px'}), label = name) for name,content in zip(figure_names, ml_all_figures)], style = {'justify-content': 'center', 'margin': '0 auto', 'width': '100%'})
return [ml_result_tabs]+ new_pipe2
#return dbc.Jumbotron(scoreshtml+ sumhtml +a +heatmapfigs + [fig2html]+ new_pipe2)#+ new_pipe2 #fig1html,fig2html
#html.Div(id = 'ml_results', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '2200px', 'height' : '1500px'}
@app.callback(Output(component_id= 'show_pipeline', component_property ='children'),
Input(component_id= 'preprocessing_functions', component_property ='children'),
Input(component_id= 'preprocessing_columns', component_property ='children'),
Input(component_id = 'clf_disp', component_property = 'value'),
Input(component_id= 'ml_results', component_property ='children'),
Input(component_id = 'submit_pipe', component_property = 'n_clicks') )
def html_pipe(f_list, c_list, val, ml_children, clicked):
ctx = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if ctx != 'ml_results':
pipe = inpt_children_to_pipe(c_list,f_list, val)
return html.Iframe(srcDoc = estimator_html_repr(pipe), height='450', width='1150',style = {'border-style':'none', 'frameborder':'none'})
else:
try: ret = ml_children[-1]['props']['srcDoc']#####['props']['children'][-1]['props']['srcDoc']
except: return html.Iframe(srcDoc = estimator_html_repr(inpt_children_to_pipe(c_list,f_list, val)), height='450', width='1150', style = {'border-style':'none', 'frameborder':'none'})
return html.Iframe(srcDoc = ret, height='450', width='1150', style = {'border-style':'none', 'frameborder':'none'}) #1150
@app.callback(Output(component_id= 'preprocessing_functions', component_property ='children'),
Output(component_id= 'preprocessing_columns', component_property ='children'),
Input('n_tabs', 'value'),
Input(component_id = 'df', component_property = 'data'),
State(component_id= 'preprocessing_functions', component_property ='children'),
State(component_id= 'preprocessing_columns', component_property ='children') )
def reajust_number_of_column_transformers_ML(val,data,oldf, oldc ):
if int(val) > len(oldf) - 1 :
new_func = oldf + [ dcc.Dropdown(options= transformer_options,value = ['passthrough'] ,multi=True,clearable=True, id = 'ColumnTransformer_'+ str(i)) for i in range(len(oldf)-1, int(val))]
elif int(val) < len(oldf) - 1:
new_func = oldf[:int(val)+1]
else:
new_func = oldf
try: df = pd.read_json(data,convert_dates = False)
except: df = pd.DataFrame([0], columns = ['0'])
col_cat = [x for x in df.columns if str(df[x].dtype) == 'int64']
col_num = [x for x in df.columns if str(df[x].dtype) == 'float64']
sorted_vals = [{'label': x, 'value': x} for x in col_num + col_cat] + [ {'label': x, 'value': x} for x in df.columns if x not in ['Unnamed: 0']+ col_num + col_cat ]
new_c = [oldc[0]]+[ dcc.Dropdown(options= sorted_vals, value = '0' , multi=True,clearable=True, id = 'ColumnSelector_'+ str(i)) for i in range(int(val))]
return new_func, new_c
@app.callback(Output(component_id= 'qpcr-data-upload', component_property ='children'),
Output(component_id= 'qpcrdf', component_property ='options'),
Output('all_qPCR_concat', 'data'),
Input('upload-qPCR2', 'contents'),
State('upload-qPCR2', 'filename'),
State('upload-qPCR2', 'last_modified'))
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = []
allqpcrs = []
for contents, filename, date in zip(list_of_contents, list_of_names, list_of_dates):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), skiprows=19)
ex_header = pd.read_csv(io.StringIO(decoded.decode('utf-8')), nrows=9, header=None).iloc[:, :2]
for i in range(9): df[ex_header.iloc[i,0]] = ex_header.iloc[i,1]
df['Bottle_replicate'] = df.Sample.apply(lambda x: x.split('.')[1] if '.' in x else -1)
df.Sample = df.Sample.apply(lambda x: x.split('.')[0])
allqpcrs += [df]
except Exception as e:
print(e)
children += [html.Div(['There was an error processing this file. Is it a CSV?' ])]
children += [html.Div([ html.H5(filename), html.Hr() ]) ]
qpcr_files_concat = pd.concat(allqpcrs).reset_index(drop = True)
vals = [ {'label': x, 'value': x} for x in qpcr_files_concat.columns]
#merger_id = dcc.Dropdown( options=vals , searchable=False, id = 'qpcr_dropdown' , value = 'Sample')
return children, vals, qpcr_files_concat.to_json()
return html.Div(),[{"label": "Sample", "value": 'Sample'}], pd.DataFrame(np.zeros([1,1])).to_json()
@app.callback(Output(component_id= 'habitat-data-upload', component_property ='children'),
Output(component_id= 'habitatdf', component_property ='options'),
Output('habitatcsv', 'data'),
Input('upload-habitat', 'contents'),
State('upload-habitat', 'filename'),
State('upload-habitat', 'last_modified'))
def update_output_hab(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = []
for contents, filename, date in zip(list_of_contents, list_of_names, list_of_dates):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename: allhabs = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
if 'tsv' in filename: allhabs = pd.read_csv(io.StringIO(decoded.decode('utf-8')), sep= '\t')
elif 'xls' in filename: allhabs = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
children += [html.Div(['There was an error processing this file.' ])]
children += [html.Div([ html.H5(filename), html.Hr()]) ]
vals = [ {'label': x, 'value': x} for x in allhabs.columns]
#merger_id = dcc.Dropdown( options=vals )#, id = 'hab_dropdown') , searchable=False
return children, vals, allhabs.to_json()
return html.Div(),[], pd.DataFrame(np.zeros([1,1])).to_json()
@app.callback(Output(component_id= 'UMAP_cat', component_property ='options'), Output(component_id= 'UMAP_cat', component_property ='value'),
Output(component_id= 'UMAP_y', component_property ='options'), Output(component_id= 'UMAP_y', component_property ='value'),
Output(component_id= 'UMAP_cont', component_property ='options'), Output(component_id= 'UMAP_cont', component_property ='value'),
Output(component_id= 'ML_target', component_property ='options'), Output(component_id= 'ML_target', component_property ='value'),
Output(component_id= 'prophet_y', component_property ='options'), Output(component_id= 'prophet_y', component_property ='value'),
Output(component_id= 'prophet_ds', component_property ='options'), Output(component_id= 'prophet_ds', component_property ='value'),
Output(component_id= 'prophet_regressors', component_property ='options'), Output(component_id= 'prophet_regressors', component_property ='value'),
Input(component_id= 'Merged_df', component_property ='children'), Input(component_id= 'df', component_property ='data'))
#State(component_id= 'preprocessing_columns', component_property ='children'))
def update_UMAP_and_ML_select_columns(inpt, data): #, columns_list_id
#if data != {'namespace': 'dash_html_components', 'props': {'children': None}, 'type': 'Div'} and data != None and inpt['type'] != 'Div':
try:
df = pd.read_json(data)
vals = [ {'label': x, 'value': x} for x in df.columns if x not in ['Unnamed: 0']]
col_cat = [x for x in df.columns if str(df[x].dtype) == 'int64']
col_num = [x for x in df.columns if str(df[x].dtype) == 'float64']
col_object = [x for x in df.columns if (str(df[x].dtype) in ['object', 'datetime64[ns]'] )]
sorted_vals = [{'label': x, 'value': x} for x in col_num + col_cat] + [ {'label': x, 'value': x} for x in df.columns if x not in ['Unnamed: 0']+ col_num + col_cat ]
if len(col_object) > 0:
if 'date' in col_object: col_object = 'date'
elif 'datetime' in col_object: col_object = 'datetime'
else: col_object = col_object[0]
vals_object = [ {'label': x, 'value': x} for x in df.columns if (str(df[x].dtype) in ['object', 'datetime64[ns]'] )]
vals_plus_umap = sorted_vals + [{'label': 'UMAP_'+str(x), 'value': 'UMAP_'+str(x)} for x in range(1,3)]
#prep_cols = [columns_list_id[0]]+[ dcc.Dropdown(options= [{'label': x, 'value': x} for x in df.columns], value = df.columns[0] , multi=True,clearable=True, id = 'ColumnSelector_'+ str(i)) for i in range(len(columns_list_id)+1)]
return sorted_vals, col_object, sorted_vals, [], sorted_vals,col_num +col_cat, sorted_vals, ['eDNA frq'], vals_plus_umap, [], vals_object,col_object, vals_plus_umap, []
except:
return [], [], [], [], [],[], [], [], [], [],[], [],[], [] #, columns_list_id str(fixed_dataset.date.dtype) == 'object'
@app.callback(Output(component_id= 'UMAP_view', component_property ='figure'),
Output(component_id= 'df_with_umap', component_property ='data'),
Output(component_id= 'graph', component_property ='children'),
Output(component_id= 'cytoscape', component_property ='children'),
Input('UMAP_start', 'n_clicks'),
State('UMAP_cat', 'value'),
State('UMAP_cont', 'value'),
State('UMAP_y', 'value'),
State('n_neighboors', 'value'),
State('UMAP_radio', 'value'),
State(component_id= 'preprocessing_columns', component_property ='children'),
State(component_id= 'preprocessing_functions', component_property ='children'),
State(component_id = 'clf_disp', component_property = 'value'),
State(component_id= 'df', component_property ='data'))
def generate_UMAP(clicked, cat_labels, cont_labels,y ,n_nb, radio_val,MLcolumns, MLfuncs, MLclassif, dataframe_json):
umap_list = []
if dataframe_json != None:
df = pd.read_json(dataframe_json).dropna()
if y == None or y == []:
if len(cont_labels) > 0:
if radio_val == 2: preprocessed_data = StandardScaler().fit_transform(df[cont_labels])
if radio_val == 1: preprocessed_data = df[cont_labels]
if radio_val == 3: preprocessed_data = inpt_children_to_pipe(MLcolumns, MLfuncs, MLclassif)['preprocessing'].fit_transform(df)
umap_list += [umap.UMAP(n_neighbors = n_nb).fit(preprocessed_data)]
if len(cat_labels) > 0:
try: umap_list += [umap.UMAP(metric="jaccard", n_neighbors=150).fit(make_pipeline(OneHotEncoder()).fit_transform(df[cat_labels]))]
except: umap_list += [umap.UMAP(metric="jaccard", n_neighbors=150).fit(make_pipeline(OrdinalEncoder(), MinMaxScaler()).fit_transform(df[cat_labels]))]
else:# len(y) > 0:#:
if len(cont_labels) > 0:
if radio_val == 2: preprocessed_data = StandardScaler().fit_transform(df[cont_labels])
if radio_val == 1: preprocessed_data = df[cont_labels]
if radio_val == 3: preprocessed_data = inpt_children_to_pipe(MLcolumns, MLfuncs, MLclassif)['preprocessing'].fit_transform(df)
umap_list +=[umap.UMAP(n_neighbors = n_nb).fit(preprocessed_data,y=df[y])]
if len(cat_labels) > 0:
try: umap_list += [umap.UMAP(metric="jaccard", n_neighbors=150).fit(make_pipeline(OneHotEncoder()).fit_transform(df[cat_labels]),y=df[y])]
except: umap_list += [umap.UMAP(metric="jaccard", n_neighbors=150).fit(make_pipeline(OrdinalEncoder(), MinMaxScaler()).fit_transform(df[cat_labels]),y=df[y])]
if len(umap_list) > 1: UMAP = umap_list[0] + umap_list[1]
elif len(umap_list) == 1: UMAP = umap_list[0]
else: return html.Div(), pd.DataFrame(np.zeros([1,1])).to_json() , html.Div()
umap_df = pd.DataFrame(UMAP.embedding_, index = df.index, columns = ['UMAP_1', 'UMAP_2'])
df = pd.concat([df, umap_df], axis = 1)
cluster = hdbscan.HDBSCAN(min_cluster_size=10, gen_min_span_tree=True)
df['hdbscan'] = cluster.fit_predict(df[['UMAP_1', 'UMAP_2']])
df.columns = [x.replace(' ', '_') for x in df.columns]
dfscatter = df.copy()
dfscatter['hdbscan'] = dfscatter['hdbscan'].apply(str) #------- covert to str ------------
dfscatter = dfscatter.reset_index()
#------------------------------------------------- generate graph of distances! ----------------------
default_stylesheet_cyto = [
{'selector': '[degree < 15]',
'style': {
'background-color': '#223c4f',# '#223c4f',
'label': 'data(id)',
'width': "30%",
'height': "30%"
}},
{'selector': 'edge',
'style': {
'line-color': '#223c4f',#"mapData(weight, 0, 10, blue, red)",
"mid-target-arrow-color": "red",
"mid-target-arrow-shape": "vee"
# '#223c4f', line-color
}},
{'selector': '[degree >= 15]',
'style': {
'background-color': 'red',# '#223c4f', line-color
#'shape': 'rectangle',
'label': 'data(id)',
'width': "40%",
'height': "40%"
}}
]
if df.shape[0] < 200:
#cyt = nx.cytoscape_data(cluster.minimum_spanning_tree_.to_networkx())['elements']
cyt = nx.from_scipy_sparse_matrix(kneighbors_graph(umap_df, 2, mode = 'distance', include_self= False, n_jobs = -1), create_using=nx.DiGraph)
#cytodisplay1 = cyto.Cytoscape(id='cytoscape', layout={'name': 'cose'},style={'width': '80%', 'height': '300px'}, elements = plotly_cyt2(cyt)) #
cytodisplay2 = cyto.Cytoscape(id='cytoscape', layout={'name': 'cose'},style={'width': '1000px', 'height': '1000px'},
stylesheet = default_stylesheet_cyto,
elements = plotly_cyt3(cyt)) #{'width': '2000px', 'height': '1000px'}
else:
df_colors_sns = pd.DataFrame(MinMaxScaler(feature_range = (-2,2)).fit_transform(dfscatter[['UMAP_1','UMAP_2']]), columns = ['UMAP_1','UMAP_2'])
colors_sns = pd.concat([make_colormap_clustering('hdbscan', 'tab10',0, dfscatter),
make_colormap_clustering('UMAP_1', 'PiYG',1, df_colors_sns).apply(lambda x: x[:-1]),
make_colormap_clustering('UMAP_2', 'PiYG',1, df_colors_sns)], axis = 1)
sns.clustermap(dfscatter[[x.replace(' ', '_') for x in cont_labels]], figsize=(15,15),cmap = sns.diverging_palette(20, 220, as_cmap=True), z_score = 1, cbar_pos = None, vmax = 2, vmin = -2,
row_colors =colors_sns , dendrogram_ratio=(.2, .1)) #col_cluster=False
fig1 = plt.gcf()
fig1.tight_layout()
cytodisplay2 = mplfig2html(fig1) #mplfig2html(fig1) --------------- edited here---------------------------
#### image from hdbscan
pic_IObytes = io.BytesIO()
fig = plt.figure(figsize = [16,6], dpi = 100)
ax = fig.add_subplot(121)
ax = cluster.single_linkage_tree_.plot(cmap='viridis', colorbar=False)
ax2 = fig.add_subplot(122)
ax2 = cluster.minimum_spanning_tree_.plot(edge_cmap='viridis',edge_alpha=0.6, node_size=80, edge_linewidth=2)
sns.despine()
fig.savefig(pic_IObytes, format='png')
fig.clear()
#lpotlyfigured2 = mpl2plotlyGraph(fig)
pic_IObytes.seek(0)
graph_part = [ html.Img(src ='data:image/png;base64,{}'.format(base64.b64encode(pic_IObytes.read()).decode()))]#cytodisplay ,cytodisplay1
#graph_part = [lpotlyfigured2]
return px.scatter(dfscatter, x="UMAP_1", y="UMAP_2", color = 'hdbscan', hover_data=dfscatter.columns, template='plotly',height=1200, width=1500), df.to_json(), graph_part,cytodisplay2
#return dcc.Graph(figure= px.scatter(dfscatter, x="UMAP_1", y="UMAP_2", color = 'hdbscan', hover_data=dfscatter.columns, template='plotly',height=1200, width=1500), id = 'umap_plot_selectable'), df.to_json(), graph_part,cytodisplay2
return px.scatter(x = [0], y = [0]), pd.DataFrame(np.zeros([1,1])).to_json(), html.Div(), html.Div()
@app.callback(Output(component_id= 'selected_points_umap', component_property ='data'),
Output(component_id= 'umap_selected_stats', component_property ='children'),
Input(component_id= 'UMAP_view', component_property ='selectedData'),
State(component_id= 'df_with_umap', component_property ='data'))
def store_selected_umap_points(x, json_df):
if x and x['points']:
try:
region = pd.concat([ | pd.DataFrame(i) | pandas.DataFrame |
#Descriptions: An inefficient script that scrubs unwanted streams and variables. Also reassigns node names to simplified names.
#Author: iblack
#Last updated: 2020-05-06
import os
import requests
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
os.chdir(r'')
master = pd.read_csv(r'')
user = '' #OOI API user for <EMAIL>
token = '' #OOI API token for <EMAIL>
base_url = 'https://ooinet.oceanobservatories.org/api/m2m/' # Base M2M URL.
deploy_url = '12587/events/deployment/inv/' # Deployment information.
sensor_url = '12576/sensor/inv/' # Sensor information.
anno_url = '12580/anno/' # Annotations information.
stream_url = '12575/stream/' # Streams information.
#Request available streams from the OOI API.
r = requests.get(base_url + stream_url,auth = (user,token)).json() #Request all OOI streams and throw it into a JSON object.
streams = json_normalize(r) #Put the JSON object into a normalized Pandas dataframe.
science_streams = streams.loc[streams['stream_type.value'].str.contains('Science')].reset_index(drop=True)
#Holder arrays
var_names = pd.DataFrame()
var_display = pd.DataFrame()
var_desc = pd.DataFrame()
var_standard = pd.DataFrame()
var_dpi = pd.DataFrame()
var_dl = pd.DataFrame()
var_dpt = pd.DataFrame()
var_units = pd.DataFrame()
var_id = pd.DataFrame()
#For each data variable in each stream.
for param in science_streams['parameters']:
d = pd.DataFrame(param).reset_index(drop=True)
#List of variables to drop.
var_drops = ['port_timestamp',
'driver_timestamp',
'internal_timestamp',
'preferred_timestamp',
'ingestion_timestamp',
'suspect_timestamp'
'date_time_string',
'oxy_calphase',
'input_voltage',
'voltage_out',
'date_of_sample',
'packet_type',
'serial_number',
'checksum',
'unique_id',
'firmware_version',
'record_length',
'sysconfig_frequency',
'sysconfig_beam_pattern',
'date_string',
'time_string',
'ext_volt0',
'meter_type',
'firmware_revision',
'instrument_id',
'record_type',
'record_time',
'voltage_battery',
'data_source_id',
'num_bytes',
'raw_signal_beta',
'raw_signal_chl',
'raw_signal_cdom',
'date_time_array',
'error_code',
'header_id',
'status',
'thermistor_raw'
'sysconfig_vertical_orientation',
'raw_time_seconds',
'raw_time_microseconds',
'suspect_timestamp',
'calibrated_phase',
'blue_phase',
'red_phase',
'temp_compensated_phase',
'blue_amplitude',
'red_amplitude',
'raw_temperature',
'error_vel_threshold',
'timer',
'thermistor_start',
'thermistor_end',
'reference_light_measurements',
'light_measurements',
'aux_fitting_1',
'aux_fitting_2',
'frame_header',
'frame_type',
'frame_counter',
'aux_fitting_3',
'rms_error',
'dcl_controller_timestamp',
'sample_time',
'temp_lamp',
'voltage_lamp',
'voltage_main',
'temp_interior',
'lamp_time',
'suspect_timestamp',
'thermistor_end',
'thermistor_start',
'time_of_sample',
'aux_fitting',
'date_of_sample',
'chl_volts',
'unique_id',
'record_time',
'light_measurements',
'thermistor_start',
'reference_light_measurements',
'battery_voltage',
'sensor_id',
'vin_sense',
'time_sync_flag',
'fixed_leader_id',
'sysconfig_sensor_config',
'num_data_types',
'va_sense',
'raw_internal_temp',
'phsen_battery_volts',
'humidity',
'sio_controller_timestamp',
'sysconfig_head_attached',
'sysconfig_vertical_orientation',
'data_flag',
'external_temp_raw',
'measurement_wavelength_beta',
'measurement_wavelength_chl',
'measurement_wavelength_cdom',
'raw_internal_temp',
'seawater_scattering_coefficient',
'total_volume_scattering_coefficient',
'port_number',
'product_number',
'internal_temperature',
'thermistor_raw',
'bit_result_demod_1',
'bit_result_demod_0',
'bit_result_timing',
'inductive_id',
'raw_internal_temp',
'start_dir',
'file_time',
'thermistor_raw',
'analog_input_2',
'analog_input_1',
'dosta_ln_optode_oxygen',
'oxy_temp_volts',
'voltage_analog',
'ref_channel_average',
'dosta_abcdjm_cspp_tc_oxygen',
'estimated_oxygen_concentration',
'ctd_tc_oxygen',
'par_val_v',
'analog1',
'absorbance_ratio',
'absolute_pressure',
'pressure_temp',
'water_velocity_east',
'ensemble_number',
'transducer_depth',
'error_seawater_velocity',
'corrected_echo',
'water_velocity_up',
'water_velocity_north',
'error_velocity',
'correlation_magnitude',
'echo_intensity',
'percent_good',
'percent_transforms_reject',
'percent_bad',
'non_zero_depth',
'depth_from_pressure',
'non_zero_pressure',
'bin_1_distance',
'cell_length',
'num_cells',
'ensemble_counter',
'amplitude_beam',
'correlation_beam',
'turbulent_velocity_east',
'turbulent_velocity_north',
'turbulent_velocity_vertical',
'abcdef_signal_intensity',
'internal_temp_raw',
'velocity_beam',
'temp_spectrometer',
'nutnr_nitrogen_in_nitrate',
'nutnr_absorbance_at',
'nutnr_bromide',
'nutnr_spectrum_average',
'spectral_channels',
'nutnr_dark_value_used',
'nutnr_integration',
'nutnr_voltage',
'nutnr_current',
'nutnr_fit',
'sample_delay',
'ref_channel_variance',
'sea_water_dark',
'spec_channel_average',
'phsen_thermistor_temperature',
'day_of_year',
'ctd_time_uint32',
'signal_intensity']
d = d.loc[~d['name'].str.contains('|'.join(var_drops))].reset_index(drop=True)
names = '|'.join(d['name'])
var_names = np.append(var_names,names)
display = '|'.join(d['display_name'])
var_display = np.append(var_display,display)
check = d.isna()
for i in range(len(check)):
if check['parameter_function_map'][i] == True:
d['parameter_function_map'][i] = 'NA'
if check['standard_name'][i] == True:
d['standard_name'][i] = 'NA'
if check['description'][i] == True:
d['description'][i] = 'NA'
if check['data_product_identifier'][i] == True:
d['data_product_identifier'][i] = 'NA'
if check['data_level'][i] == True:
d['data_level'][i] = 'NA'
if check['data_product_type'][i] == True:
d['data_product_type'][i] = 'NA'
desc = '|'.join(d['description'])
var_desc = np.append(var_desc,desc)
dpi = '|'.join(d['data_product_identifier'])
var_dpi = np.append(var_dpi,dpi)
dpt_df = pd.DataFrame()
for dpt in d['data_product_type']:
t = pd.DataFrame([dpt])
dpt_df = pd.concat([dpt_df,t])
try:
dpt = dpt_df['value'].to_numpy().astype(str)
except:
dpt = dpt_df[0].to_numpy().astype(str)
dpt = '|'.join(dpt)
var_dpt = np.append(var_dpt,dpt)
standard = '|'.join(d['standard_name'])
var_standard = np.append(var_standard,standard)
dl = '|'.join(d['data_level'].astype(str))
var_dl = np.append(var_dl,dl)
units_df = pd.DataFrame()
for unit in d['unit']:
u = pd.DataFrame([unit])
u = u['value']
units_df = pd.concat([units_df,u])
units_df = units_df.reset_index(drop=True)
units_df = '|'.join(units_df[0])
var_units = np.append(var_units,units_df)
param_id = '|'.join(d['id'].astype('str'))
var_id = np.append(var_id,param_id)
data = {'NCVARS':var_names,
'VARS_UNITS':var_units,
'VARS_ID':var_id,
'VARS_DISPLAY_NAME':var_display,
'VARS_STANDARD_NAME':var_standard,
'VARS_DESCRIPTION':var_desc,
'VARS_DATA_PRODUCT_ID':var_dpi,
'VARS_DATA_PRODUCT_TYPE':var_dpt,
'VARS_DATA_LEVEL':var_dl,}
info = pd.DataFrame(data = data)
stream_info = | pd.concat([science_streams,info],axis = 1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
下载合约分钟
由于数据量很大,可以只下载一些关键的
比如
主力、次主力
近月、远月
一定要通过update_first_last.py更新数据的下载范围
否则会全下,还会覆盖前面的数据
"""
import os
import sys
from datetime import datetime, timedelta
import pandas as pd
from WindPy import w
from demo_future.E01_download_daily import read_constituent_at_date, merge_constituent_date
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__, __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.utils.symbol import split_alpha_number
from kquant_data.utils.xdatetime import yyyyMMdd_2_datetime, datetime_2_yyyyMMddHHmm
from kquant_data.wind.tdays import read_tdays
from kquant_data.wind.wsi import download_min_ohlcv
from kquant_data.xio.csv import read_datetime_dataframe
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
def download_constituent_min(w, dirpath, date, ipo_last_trade, first_last, wind_code_set, trading_days):
constituent = read_constituent_at_date(dirpath, date)
if constituent is None:
# 没有对应的板块,因当是与上次一样导致
# 没关系,上次数据应当已经下载过了
return wind_code_set
constituent_dt = merge_constituent_date(constituent, ipo_last_trade, first_last)
for i in range(constituent_dt.shape[0]):
row = constituent_dt.iloc[i]
wind_code = row['wind_code']
# 当前会话,不重复下载
if wind_code in wind_code_set:
continue
wind_code_set.add(wind_code)
# 时间已经到期了,不重复下载
# 这里考虑时间
if datetime_2_yyyyMMddHHmm(row['start']) == datetime_2_yyyyMMddHHmm(row['end']):
continue
product, num = split_alpha_number(wind_code)
path_dir = os.path.join(root_path, product)
if not os.path.exists(path_dir):
os.mkdir(path_dir)
path_csv = os.path.join(path_dir, '%s.csv' % wind_code)
# 从开始到结束,可能跨度太长,特别是新下数据,可能超过一年,所以决定按月下载,这样更快
# 保存时如果多次保存,然后放在一个文件中,会有问题
trading_days['idx'] = range(len(trading_days))
start = row['start']
end = row['end']
trading_days_idx = trading_days[start._date_repr:end._date_repr]['idx']
rng = list(range(trading_days_idx[0], trading_days_idx[-1], 30))
# 右闭
rng.insert(len(rng), trading_days_idx[-1])
for idx, r in enumerate(rng):
if idx == 0:
continue
start_ = trading_days.iloc[rng[idx - 1]]['date']
end_ = trading_days.iloc[r]['date']
if idx == 1:
# 第一个位置比较特殊,要取到前一个交易日的晚上
start_ = trading_days.iloc[rng[idx - 1] - 1]['date']
start_ += | pd.Timedelta('20H') | pandas.Timedelta |
import sqlite3, csv, re, pickle
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
from fuzzywuzzy import process
import sqlite3, csv, re, pickle
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
from fuzzywuzzy import process
############################
# set up SQLite DB
############################
db =sqlite3.connect('MoviesData.db')
cur = db.cursor()
db
############################
# load rating data
############################
sql = """SELECT userId,movieId,rating
FROM ratings
"""
cur.execute(sql)
data = cur.fetchall()
############################
# load movie data with some aggregations
############################
sql2 = """with rating_sum as
(SELECT movieId,round(avg(rating),1) as rating, count(rating) as rating_cnt
FROM ratings
GROUP BY movieId),
tags_combi as
(SELECT t1.movieId, GROUP_CONCAT(t2.tag) as all_tags
FROM tags t1 LEFT JOIN tags t2
ON t1.movieId = t2.movieId
GROUP BY t1.movieId)
SELECT
m.movieId,
m.title,
m.genres,
r.rating as rating,
r.rating_cnt as rating_cnt,
t.all_tags,
l.imdbId,
l.tmdbId
FROM movies m
LEFT JOIN rating_sum r ON (m.movieId=r.movieId)
LEFT JOIN tags_combi t ON (m.movieId=t.movieId)
LEFT JOIN links l ON (m.movieId=l.movieId)
"""
cur.execute(sql2)
data2 = cur.fetchall()
def get_recommendations(mov1,mov2,mov3,getrating1,getrating2,getrating3):
############################
# making a panda dataframe
############################
df_mov = pd.DataFrame(data2 ,columns=['movie_Id', 'title', 'genres','rating','rating_cnt','tags','imdb_Id','tmdb_Id'])
def extract_year(title):
pattern='\(([0-9]{4})\)'
try:
year = re.findall(pattern, title)[0]
except:
year=1900
return int(year)
df_mov['year'] = df_mov['title'].apply(extract_year)
############################
# NMF model for ratings data
############################
df_rat = pd.DataFrame(data ,columns=['user_Id','movie_Id','rating'])
df_rat = df_rat.set_index(['user_Id','movie_Id'])
df_rat = df_rat.unstack(1)
df_rat = df_rat.fillna(df_rat.mean())
# working copy
df_org = df_rat.copy()
nmf = NMF(n_components=3)
nmf.fit(df_rat)
# Pickle
binary = pickle.dumps(nmf)
open('nmf_model.bin', 'wb').write(binary)
binary = open('nmf_model.bin', 'rb').read()
nmf = pickle.loads(binary)
# P & Q values
P = nmf.transform(df_rat)
Q = nmf.components_
nR = np.dot(P, Q)
df_out = pd.DataFrame(nR)
# add new row to ratings dataframe
df_out = df_out.append(pd.Series([np.nan]), ignore_index = True)
#lookup movie id from moview df
id_returned1 = df_mov.loc[df_mov['title'] == mov1, 'movie_Id']
id_returned2 = df_mov.loc[df_mov['title'] == mov2, 'movie_Id']
id_returned3 = df_mov.loc[df_mov['title'] == mov3, 'movie_Id']
# enter users rating
df_out.iloc[-1, df_out.columns.get_loc(int(id_returned1)-1)] = int(getrating1)
df_out.iloc[-1, df_out.columns.get_loc(int(id_returned2)-1)] = int(getrating2)
df_out.iloc[-1, df_out.columns.get_loc(int(id_returned3)-1)] = int(getrating3)
# duplicatethe user line and add average to the prediction data
df_pred1 = df_out.tail(1)
df_out = df_out.fillna(df_out.mean())
df_pred2 = df_out.tail(1)
df_out.drop(df_out.tail(1).index,inplace=True)
# merge with movies dataframe
df_pred = pd.concat([ | pd.DataFrame(df_pred1.T) | pandas.DataFrame |
from yelpgoogletool import __version__
from yelpgoogletool import yelpgoogletool
import pandas as pd
def test_version():
assert __version__ == '1.0.0'
def test_SearchRestaurant():
list_len = 30
search_result = yelpgoogletool.SearchRestaurant(list_len = list_len)
assert isinstance(search_result,pd.core.frame.DataFrame) & (search_result.shape[0]<=list_len)
def test_FindBestRestaurant():
# create a testing dataframe `test_df`
name = pd.Series(['a','b','c','d','e','f','g','h'])
distance = pd.Series([1,2,3,4,5,6,7,8])
rating = pd.Series([2.0,5.0,4.5,4.0,4.5,3.0,3.5,4.0])
review_count = pd.Series([40,60,30,20,70,80,15,9])
test_df = | pd.DataFrame({"name":name,"distance":distance,"rating":rating,"review_count":review_count}) | pandas.DataFrame |
import pandas as pd
import numpy as np
# This case is handled identically to a dict of arrays.
data = np.zeros(shape=(2,), dtype=[('A', 'i4'), ('B', 'f4'), ('C', 'a10')])
data[:] = [(1, 2, 'Hello'), (3., 4., "World")]
df = pd.DataFrame(data=data)
print("DataFrame from Structured array : ")
# we get third column output as byte
print(df)
df = pd.DataFrame(data=data, index=['first', 'second'])
print("DataFrame from Structured array with index : ")
print(df)
| pd.DataFrame(data, index=['first', 'second'], columns=['C', 'A', 'B']) | pandas.DataFrame |
"""
DBSCAN or HDBSCAN clustering. This class also performs a greedy search in 2 DBSCAN/HDBSCAN parameter assed as first-param-name
and second-param-name
author: <NAME>, <NAME>
date: 05/2020
Usage:
python3 ClusterScan.py
--root-path-MTCNN-results ~/DATASET_GOOGLE_IMGS/VIDEO_DB_MTCNN
--program-participants-folder ~/test/
--first-param-name eps
--first-param-lower 0.5
--first-param-upper 12
--first-param-step 0.5
--metric euclidean
--cluster-instance DBSCAN
--second-param-value [10]
--metric euclidean
--dataset Google
--output-dir ~/supervised_results/BASELINE_OFICIAL_LN24H_v2
--individual-clusters
--quality-metric silhouette
--with-previous-individual-clusters
--with-previous-common-clusters
Options:
--root-path-MTCNN-results: Root path to the results obained by MTCNN (embs, bbox, mtcnn_debub, bbox_summ)
--program-participants-folder: Path to the folder with the .csv that contain the names of the
participants per program (See /AQuery_launcher/Aqeury_launcher_bingEngine.py
--first-param-name: Name of the first param as it appears in the DBSCAN and HDBSCAN libraries.
-DBSCAN: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
-HDBSCAN: https://github.com/scikit-learn-contrib/hdbscan
[default: eps (DBSCAN param.)]
--first-param-lower: Lower value for first parameter [default: 0.5]. For DBSCAN this parameter is eps
--first-param-upper: Upper value for fisrt parameter [default: 12]. For DBSCAN this parameter is eps
--first-param-step: Step between 2 consecutive values of the first parameter. For DBSCAN this parameter is eps
--second-param-value: List with second param values [default: [10]]
--metric: Distance metric in DBSCAN [Options: euclidean, cosine, manhattan, l1, l2]
--dataset: Dataset reference name to extract clustering & save results(e.g Google, OCR, Google_OCR, Google_Program ...)
--output-dir Directory to save results in.By default, it will act as root folder and results will be saved in the
path: output_dir/Cfacial_clustering_results/'dataset'/
|_cluster_models/'program'/
|_cluster_eps_x_minSamples_x.sav
|_cluster_graphics/parameters/'program'
|_(matrix/graphics...)
--individual-clusters: True if we want a cluster per participant, else it creates a common cluster for all participants
--quality-metric: Metric to use as quality and select the best cluster combination of parameters. Options ( cluster, v_meassure, silhouette, calinski_harabasz).
--with-previous-individual-clusters: True if we did a previous individual cluster per participant, else False
--with-previous-common-clusters: True if we did a previous common cluster with all the participants, else False.
-h, --help Display script additional help
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import src.utils.loader as loader
import src.utils.saver as saver
from sklearn.cluster import DBSCAN
from hdbscan import HDBSCAN
from sklearn.metrics import silhouette_score
from sklearn.metrics import calinski_harabasz_score
import pickle
import src.metrics.metrics_clustering as metrics_clustering
from sklearn import metrics as sklearn_metrics
from src.BaseArgs import ClusteringArgs
import src.utils.files_utils as tools
class ClusterScan():
def __init__(self, root_path_MTCNN_results, program_name, first_param_lower, first_param_upper, first_param_step,
second_parameter_list, output_dir, program_participants, metric="euclidean",
first_param_name = "eps", second_param_name ="min_samples",cluster_instance="DBSCAN"):
#cluster parameters to scan:
self.first_param_values = sorted(list(np.linspace(first_param_upper,first_param_lower,(first_param_upper-first_param_lower)/first_param_step, endpoint=False))+[float(first_param_lower)])
self.second_param_values = second_parameter_list
self.metric = metric
self.cluster_instance = None
self.first_param_name = first_param_name
self.second_param_name = second_param_name
if(cluster_instance=="HDBSCAN"):
self.cluster_instance = HDBSCAN()
else:
self.cluster_instance = DBSCAN()
#Input
self.root_input_path = root_path_MTCNN_results
self.program = program_name
self.program_participants = program_participants
#Output
self.output_dir = output_dir
#Best cluster data
self.best_cluster_quality = -1
def load_cluster(self, path_cluster_model,input_embs, cluster_parameters={}):
"""
Load DBSCAN model from path_cluster_model. If the cluster does not exists, then train it.
:param path_cluster_model: Path to load/save cluster
:param input_embs: Input data to train cluster. In this case, faces embeddings
:param cluster_parameters: Dict with the cluster parameters to load:
e.g. for DBSCAN: {'eps': 1.5, 'min_samples':10,'metric':'euclidean'}
:param min_sample: min_sample DBSCAN parameter
:return:
- clt: trained cluster (DBSCAN or HDBSCAN)
"""
if(type(self.cluster_instance) is type(HDBSCAN())):
if("min_cluster_size" in cluster_parameters):
cluster_parameters["min_cluster_size"] = int(cluster_parameters["min_cluster_size"])
try:
if (os.path.exists(path_cluster_model)):
clt = pickle.load(open(path_cluster_model, 'rb'))
else:
self.cluster_instance.set_params(**cluster_parameters)
clt = self.cluster_instance
clt.fit(input_embs)
pickle.dump(clt, open(path_cluster_model, 'wb'))
except ModuleNotFoundError:
self.cluster_instance.set_params(**cluster_parameters)
clt = self.cluster_instance
clt.fit(input_embs)
pickle.dump(clt, open(path_cluster_model, 'wb'))
return clt
def cluster_parameters_scan(self, output_path_models, output_path_graphics):
"""
Scan parameters of DBSCAN in terms of min_samples and epsilon & save the generated clusters for a posterior
analysis of the cluster that best fit
to the expected number of clusters
:param program: (str) : Name of the program to analyse
:param output_path_models (str): Path where the clusters will be saved
:param output_path_graphics (str): Path where the graphs and the other metadata will be saved
"""
input_path_embs = os.path.join(self.root_input_path, self.program, "embeddings_sum")
input_path_bbox = os.path.join(self.root_input_path, self.program, "boundingboxes_sum")
os.makedirs(output_path_graphics, exist_ok=True)
#Create list of trust participants based on mean faces in imgs downloaded
# load embs & bboxes for participants of program
bbox_total_faces, bbox_total_labels, embs_total_faces, embs_total_labels = loader.load_bboxes_and_embs(
input_path_bbox, input_path_embs, self.program_participants)
embs_total_labels2 = [x.split("/")[0] for x in embs_total_labels]
print('\n' + str(len(self.program_participants)) + ' PARTICIPANTS in ' + self.program)
# If there are images for person, then create clustering
if(len(bbox_total_labels)>1):
os.makedirs(output_path_models, exist_ok=True)
# Scan eps and min_sample values
df_results = pd.DataFrame()
cluster_matrix = np.zeros(shape=(len(self.first_param_values), len(self.second_param_values))) # number of clusters created by DBSCAN
avg_silhouette_matrix = np.zeros(
shape=(len(self.first_param_values), len(self.second_param_values))) # value of avg_silhouette for each combination in the scan
calinshi_harabasz_matrix = np.zeros(shape=(
len(self.first_param_values), len(self.second_param_values))) # value of calinshi_harabasz score for each combination in the scan
v_measure_matrix = np.zeros(shape=(len(self.first_param_values), len(self.second_param_values)))
i, j = 0, 0
fig = plt.figure()
for snd_param_val in self.second_param_values:
clusters = []
i = 0
for first_param_val in self.first_param_values:
print("\n[INFO] Clustering for "+self.first_param_name+" = " + str(first_param_val) + " and "+self.second_param_name+" = " + str(snd_param_val))
name_cluster = "cluster_"+self.first_param_name+"_" + str(first_param_val) + "_"+self.second_param_name+"_" + str(snd_param_val) + ".sav"
path_cluster_model = os.path.join(output_path_models, name_cluster)
cluster_params = {self.first_param_name:first_param_val,
self.second_param_name: snd_param_val,
"metric":self.metric}
clt = self.load_cluster(path_cluster_model, embs_total_faces, cluster_params)
numUniqueFaces = len(np.where(np.unique(clt.labels_) > -1)[0])
clusters.append(numUniqueFaces)
df_results = df_results.append(
pd.DataFrame([[first_param_val, snd_param_val, numUniqueFaces]], columns=[self.first_param_name, self.second_param_name, "n_clusters"]))
print("> Clusters: {}".format(numUniqueFaces))
cluster_matrix[i, j] = numUniqueFaces
try:
y_pred = clt.labels_
avg_silhouette_matrix[i, j] = silhouette_score(embs_total_faces, y_pred)
calinshi_harabasz_matrix[i, j] = calinski_harabasz_score(embs_total_faces,y_pred)
homogeneity, completeness, v_measure = sklearn_metrics.homogeneity_completeness_v_measure(
embs_total_labels2, y_pred)
v_measure_matrix[i, j] = v_measure
except ValueError:
avg_silhouette_matrix[i, j] = -1
calinshi_harabasz_matrix[i, j] = -1
v_measure_matrix[i, j] = -1
i += 1
j += 1
plt.plot(self.first_param_values, clusters, label=self.second_param_name+' = ' + str(snd_param_val))
plt.xlabel(self.first_param_name)
plt.ylabel('# Number of clusters')
plt.legend(fontsize="small")
plt.grid(True, which='both')
plt.minorticks_on()
# save data
plt.savefig(os.path.join(output_path_graphics, 'cluster_graphic.png'))
plt.close(fig)
# Save graphics and matrixes generated for posterior choice of best cluster
self.save_clustering_result_graphics(output_path_graphics, df_results, cluster_matrix, avg_silhouette_matrix,
calinshi_harabasz_matrix, v_measure_matrix)
def save_clustering_result_graphics(self, output_path_graphics,df_results,cluster_matrix,avg_silhouette_matrix,
calinshi_harabasz_matrix, v_measure_matrix):
"""
Save information about cluster training
:param output_path_graphics: Path to save matrixes with metrics extracted from clusters
:param df_results: Dataframe with columns: [eps,min_sample,number_clusters] that contain results of that clustering configuration
:param cluster_matrix: Matrix with as many rows as epsilons tested and as many columns as min_samples. In each
cell it will contain the total number of clusters per (eps,min_sample) combination
:param avg_silhouette_matrix: Matrix with as many rows as epsilons tested and as many columns as min_samples. In each
cell it will contain the results obtained of sklearn silhouette_score metric.
:param calinshi_harabasz_matrix: Similar to previous but in this case the matrix contains the results of the calinski_harabasz_score
:param v_measure_matrix: Similar to previous but in this case the matrix contains the results of the homogeneity_completeness_v_measure
"""
out_path_csv = os.path.join(output_path_graphics, 'combination_parameters_Ncluster.csv')
df_results.to_csv(out_path_csv, index=False)
dict_matrixes = {'cluster_matrix':cluster_matrix,
'silhouette_matrix':avg_silhouette_matrix,
'calinski_harabasz_matrix':calinshi_harabasz_matrix,
'v_meassure_matrix':v_measure_matrix}
for matrix_name in dict_matrixes.keys():
#Generate csv from matrixed for silhouette, calinshi, v_measure and n_clusters
( | pd.DataFrame(dict_matrixes[matrix_name]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.