prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import anchors
from poola import core as pool
from sklearn.metrics import auc
## Reformatting functions
##
def clean_Sanjana_data(df, guide_col='Guide', library = False):
'''
Input: 1. df: Reads dataframe with guide_col and data columns
2. guide_col: Formatted as 'library_guide_gene' (e.g. 'HGLibA_00001_A1BG')
Output: df_clean: Dataframe with columns 'Guide', 'Gene Symbol', 'Reads'
'''
df_clean = df.rename(columns={guide_col:'old_Guide'})
library_list = []
guide_list = []
gene_list = []
for i, row in enumerate(df_clean.loc[:,'old_Guide']):
split_row = row.split('_')
library = split_row[0]
library_list.append(library)
guide = split_row[1]
guide_list.append(guide)
gene = split_row[2]
gene_list.append(gene)
df_clean['Library'] = pd.Series(library_list)
df_clean['Guide#'] = pd.Series(guide_list)
df_clean['Guide'] = df_clean[['Library','Guide#']].apply(lambda x: '_'.join(x.dropna().values), axis=1)
df_clean['Gene Symbol'] = pd.Series(gene_list)
df_clean = df_clean.drop(['Library', 'Guide#','old_Guide'], axis = 1)
# Reorder columns so Guide, Gene Symbol then data columns
data_cols = [col for col in df.columns if col != guide_col]
col_order = ['Guide','Gene Symbol'] + data_cols
df_clean = df_clean[col_order]
return df_clean
def merge_dict_dfs(dictionary, merge_col = 'Gene Symbol', merge_how = 'outer', suffixes = ['_x', '_y']):
'''
Input: 1. dictionary: dictionary containing dataframes
2. merge_col: name of column on which dataframes will be merged (default = 'Gene Symbol')
3. merge_how: type of merge (default = 'outer')
4. suffixes: suffixes if two columns have the same name in dataframes being merged (default = ['_x','_y'])
Output: merge1: merged dataframe
'''
merge1 = pd.DataFrame()
keys = []
for df_name in dictionary.keys():
keys.append(df_name)
for i, df_name in enumerate(keys):
current_df = dictionary[df_name]
if (i+1 < (len(keys))): #stop before last df
next_df_key = keys[i+1]
next_df = dictionary[next_df_key]
# merge dfs
if merge1.empty: # if merged df does not already exist
merge1 = pd.merge(current_df, next_df, on = merge_col, how = merge_how, suffixes = suffixes)
#print(merge1.columns)
else: #otherwise merge next_df with previous merged df
new_merge = pd.merge(merge1, next_df, on = merge_col, how = merge_how)
merge1 = new_merge
return merge1
def convertdftofloat(df):
'''
Converts df data column type into float
Input:
1. df: data frame
'''
for col in df.columns[1:]:
df[col] = df[col].astype(float) #convert dtype to float
return df
## QC functions
##
def get_lognorm(df, cols = ['Reads'], new_col = ''):
'''
Inputs:
1. df: clean reads dataframe
2. cols: list of names of column containing data used to calculate lognorm (default = ['Reads'])
3. new_col: lognorm column name (optional)
Output: New dataframe with columns 'Gene Symbol', '[col]_Lognorm' (default = 'Reads_lognorm')
'''
df_lognorm = df.copy().drop(cols, axis = 1)
for c in cols:
df_lognorm[c+'_lognorm'] = pool.lognorm(df[c])
return df_lognorm
def calculate_lfc(lognorm_df, target_cols, ref_col = 'pDNA_lognorm'):
'''
Inputs:
1. lognorm_df: Dataframe containing reference and target lognorm columns
2. target_cols: List containing target column name(s) (lognorm column(s) for which log-fold change should be calculated)
3. ref_col: Reference column name (lognorm column relative to which log-fold change should be calculated)(default ref_col = 'pDNA_lognorm')
Outputs:
1. lfc_df: Dataframe containing log-fold changes of target columns
'''
#input df with lognorms + pDNA_lognorm
lfc_df = pool.calculate_lfcs(lognorm_df=lognorm_df,ref_col=ref_col, target_cols=target_cols)
for col in target_cols: #rename log-fold change column so doesn't say "lognorm"
lfc_col_name = col.replace('lognorm', 'lfc')
lfc_df = lfc_df.rename(columns = {col:lfc_col_name})
return lfc_df
def get_controls(df, control_name = ['NonTargeting'], separate = True):
'''
Inputs:
1. df: Dataframe with columns "Gene Symbol" and data
2. control_name: list containing substrings that identify controls
3. separate: determines whether to return non-targeting and intergenic controls separately (default = True)
Outputs:
1. control: Dataframe containing rows with Gene Symbols including control string specified in control_name
OR 2. control_dict: If separate and multiple control names, dictionary containing dataframes
OR 3. all_controls: If separate = False and multiple control names, concatenated dataframes in control_dict
'''
if len(control_name) == 1:
control = df[df['Gene Symbol'].str.contains(control_name[0], na=False)]
return control
else:
control_dict = {}
for i, ctrl in enumerate(control_name):
control_dict[ctrl] = df[df['Gene Symbol'].str.contains(ctrl, na=False)]
if separate:
return control_dict
else:
all_controls = pd.concat(list(control_dict.values()))
return all_controls
def get_gene_sets():
'''
Outputs: essential and non-essential genes as defined by Hart et al.
'''
ess_genes = pd.read_csv('../../../Data/External/Gene_sets_Hart/essential-genes.txt', sep='\t', header=None)
ess_genes.columns = ['Gene Symbol']
ess_genes['ess-val'] = [1]*len(ess_genes)
non_ess = pd.read_csv('../../../Data/External/Gene_sets_Hart/non-essential-genes.txt', sep='\t', header=None)
non_ess.columns = ['Gene Symbol']
non_ess['non-ess-val'] = [1]*len(non_ess)
return ess_genes, non_ess
def merge_gene_sets(df):
'''
Input:
1. df: data frame from which ROC-AUC is being calculated
Output:
1. df: data frame with binary indicators for essential and non-essential genes
'''
ess_genes, non_ess = get_gene_sets()
df = pd.merge(df, ess_genes, on='Gene Symbol', how='left')
df['ess-val'] = df['ess-val'].fillna(0)
df = pd.merge(df, non_ess, on='Gene Symbol', how='left')
df['non-ess-val'] = df['non-ess-val'].fillna(0)
return df
def get_roc_auc(df, col):
'''
Inputs:
1. df: data frame from which ROC-AUC is being calculated
2. col: column with data for which ROC-AUC is being calculated
Outputs:
1. roc_auc: AUC value where true positives are essential genes and false positives are non-essential
2. roc_df: dataframe used to plot ROC-AUC curve
'''
df = df.sort_values(by=col)
df['ess_cumsum'] = np.cumsum(df['ess-val'])
df['non_ess_cumsum'] = np.cumsum(df['non-ess-val'])
df['fpr'] = df['non_ess_cumsum']/(df['non_ess_cumsum'].iloc[-1])
df['tpr'] = df['ess_cumsum']/(df['ess_cumsum'].iloc[-1])
df.head()
roc_auc = auc(df['fpr'],df['tpr'])
roc_df = pd.DataFrame({'False_Positive_Rate':list(df.fpr), 'True_Positive_Rate':list(df.tpr)})
return roc_auc, roc_df
## Plotting functions
##
def pair_cols(df, initial_id, res_id, sep = '_', col_type = 'lfc'): #if more than one set of initial/resistant pop pairs, sharex = True, store pairs in list
'''
Inputs:
1. df: Dataframe containing log-fold change values and gene symbols
2. initial_id: string identifying initial column names (default: 'control'), only used if multiple subplots
3. res_id: string identifying resistant column names (default: 'MOI'), only used if multiple subplots
4. sep: character separator in column name
3. col_type: string in names of columns containing data to be plotted (default: 'lfc')
Outputs:
1. sharex: if number of pairs greater than 1 indicating multiple subplots
2. pairs: pairs of initial and resistant populations as list of lists
'''
cols = [col for col in df.columns if col_type in col]
pairs = [] #list of lists: ini/res pop pairs
sharex = False
if len(cols) > 2: #if more than one set of initial/resistant pop pairs
for index, col in enumerate(cols):
pair = []
if initial_id in col: #find corresponding resistant pop
pair.append(col)
res_pop = [col for col in cols if res_id in col]
for col in res_pop:
pair.append(col)
pairs.append(pair) #add to list of pairs (list of lists)
if len(pairs) > 1:
sharex = True # set sharex parameter for subplot
return sharex, pairs
else: #if only one pair of initial/resistant pops
sharex = False
pairs.append(cols)
return sharex, pairs
def lfc_dist_plot(chip_lfc, initial_id=None, res_id=None, paired_cols=None, col_sep = '_', filename = '', figsize = (6,4)): #kde plots of population distribution (initial, resistant)
'''
Inputs:
1. chip_lfc: Dataframe containing log-fold change values and gene symbols
Option 1:
2. initial_id: substring in names of column containing log-fold changes of uninfected population
3. res_id: substring in names of column containing log-fold changes of infected population
Option 2:
4. paired_cols: if using modified pair_cols function but same two outputs of sharex, lfc_pairs
5. filename: string for file name when saving figure
6. figsize: default (6,4)
Outputs: kde plots of population distribution (initial, resistant)
'''
if not paired_cols:
sharex, lfc_pairs = pair_cols(chip_lfc, initial_id = initial_id, res_id = res_id, sep = col_sep)
else:
sharex, lfc_pairs = paired_cols
fig, ax = plt.subplots(nrows = len(lfc_pairs), ncols = 1, sharex = sharex, figsize = figsize)
i = 0 # ax index if have to plot multiple axes
for k,c in enumerate(lfc_pairs):
for l, c1 in enumerate(c):
#title ex. Calu-3 Calabrese A screen 1, (k+1 = screen #)
if not filename:
title = ' '.join(c1.split(' ')[:3]) + ' (populations)'
else:
title = filename
if l==0:
label1 = c1
else:
label1 = c1
if sharex: #if multiple axes, ax = ax[i]
chip_lfc[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=label1, ax=ax[i], legend=True)
t = ax[i].set_xlabel('Log-fold changes')
t = ax[i].set_title(title)
ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
chip_lfc[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=label1, ax=ax, legend=True)
t = ax.set_xlabel('Log-fold changes')
t = ax.set_title(title)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
i+=1
sns.despine()
#Control distributions
def control_dist_plot(chip_lfc, control_name, filename, gene_col = 'Gene Symbol', initial_id=None, res_id=None, paired_cols=None, col_sep = '_', figsize = (6,4)):
'''
Inputs:
1. chip_lfc: annotated lfc data frame
2. control_name: list containing strings identifying controls
3. initial_id: string identifying initial column names
4. res_id: string identifying resistant column names
5. filename: filename for saving figure
6. figsize: default (6, 4)
Outputs: kde plots of control distributions (initial, resistant)
'''
if not paired_cols:
sharex, lfc_pairs = pair_cols(chip_lfc, initial_id = initial_id, res_id = res_id, sep = col_sep)
else:
sharex, lfc_pairs = paired_cols
controls = get_controls(chip_lfc, control_name)
nrows = len(lfc_pairs)
fig, ax = plt.subplots(nrows = nrows, ncols = 1, sharex = sharex, figsize = figsize)
i = 0 # ax index if have to plot multiple axes
for k,c in enumerate(lfc_pairs): # k=screen, c=ini, res pair
for l, c1 in enumerate(c): # l = ini or res, c1 = pop label
title = c1 + ' (controls)'
pop_label = c1.split(' ')[0] #labels 'initial' or 'resistant'
#Plot same screen on same subplot
if sharex: #if multiple axes, ax = ax[i]
controls[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=control_name[0] +' ('+pop_label+')', ax=ax[i], legend=True)
ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
sns.despine()
t = ax[i].set_xlabel('Log-fold changes')
t = ax[i].set_title(title)
else:
controls[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=control_name[0]+ ' ('+pop_label+')', ax=ax, legend=True)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
sns.despine()
t = ax.set_xlabel('Log-fold changes')
t = ax.set_title(title)
i+=1 #switch to next subplot for next screen
sns.despine()
## Residual functions
def run_guide_residuals(lfc_df, initial_id=None, res_id=None, paired_cols = None):
'''
Calls get_guide_residuals function from anchors package to calculate guide-level residual z-scores
Input:
1. lfc_df: data frame with log-fold changes (relative to pDNA)
'''
lfc_df = lfc_df.drop_duplicates()
if not paired_cols:
paired_lfc_cols = pair_cols(lfc_df, initial_id, res_id)[1] #get lfc pairs
else:
paired_lfc_cols = paired_cols
#reference_df: column1 = modifier condition, column2 = unperturbed column
ref_df = | pd.DataFrame(columns=['modified', 'unperturbed']) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
| pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) | pandas.DataFrame |
from typing import List, Optional, Union
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, clone
from feature_engine.dataframe_checks import (
_check_input_matches_training_df,
_is_dataframe,
)
from feature_engine.validation import _return_tags
from feature_engine.variable_manipulation import (
_check_input_parameter_variables,
_find_all_variables,
_find_or_check_numerical_variables,
)
class SklearnTransformerWrapper(BaseEstimator, TransformerMixin):
"""
Wrapper to apply Scikit-learn transformers to a selected group of variables. It
works with transformers like the SimpleImputer, OrdinalEncoder, OneHotEncoder, all
the scalers and also the transformers for feature selection.
Parameters
----------
transformer: sklearn transformer
The desired Scikit-learn transformer.
variables: list, default=None
The list of variables to be transformed. If None, the wrapper will select all
variables of type numeric for all transformers, except the SimpleImputer,
OrdinalEncoder and OneHotEncoder, in which case, it will select all variables
in the dataset.
Attributes
----------
transformer_:
The fitted Scikit-learn transformer.
variables_:
The group of variables that will be transformed.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Fit Scikit-learn transformer
transform:
Transform data with Scikit-learn transformer
fit_transform:
Fit to data, then transform it.
"""
def __init__(
self,
transformer,
variables: Union[None, int, str, List[Union[str, int]]] = None,
) -> None:
if not issubclass(transformer.__class__, BaseEstimator):
raise TypeError(
"transformer expected a Scikit-learn transformer, "
f"got {transformer} instead."
)
self.transformer = transformer
self.variables = _check_input_parameter_variables(variables)
def fit(self, X: pd.DataFrame, y: Optional[str] = None):
"""
Fits the Scikit-learn transformer to the selected variables.
If you enter None in the variables parameter, all variables will be
automatically transformed by the OneHotEncoder, OrdinalEncoder or
SimpleImputer. For the rest of the transformers, only the numerical variables
will be selected and transformed.
If you enter a list in the variables attribute, the SklearnTransformerWrapper
will check that those variables exist in the dataframe and are of type
numeric for all transformers except the OneHotEncoder, OrdinalEncoder or
SimpleImputer, which also accept categorical variables.
Parameters
----------
X: Pandas DataFrame
The dataset to fit the transformer
y: pandas Series, default=None
The target variable.
Raises
------
TypeError
If the input is not a Pandas DataFrame
Returns
-------
self
"""
# check input dataframe
X = _is_dataframe(X)
self.transformer_ = clone(self.transformer)
if (
self.transformer_.__class__.__name__ == "OneHotEncoder"
and self.transformer_.sparse
):
raise AttributeError(
"The SklearnTransformerWrapper can only wrap the OneHotEncoder if you "
"set its sparse attribute to False"
)
if self.transformer_.__class__.__name__ in [
"OneHotEncoder",
"OrdinalEncoder",
"SimpleImputer",
]:
self.variables_ = _find_all_variables(X, self.variables)
else:
self.variables_ = _find_or_check_numerical_variables(X, self.variables)
self.transformer_.fit(X[self.variables_], y)
self.n_features_in_ = X.shape[1]
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Apply the transformation to the dataframe. Only the selected variables will be
modified.
If transformer is the OneHotEncoder, the dummy features will be concatenated
to the input dataset. Note that the original categorical variables will not be
removed from the dataset after encoding. If this is the desired effect, please
use Feature-engine's OneHotEncoder instead.
Parameters
----------
X: Pandas DataFrame
The data to transform
Raises
------
TypeError
If the input is not a Pandas DataFrame
Returns
-------
X: Pandas DataFrame
The transformed dataset.
"""
# check that input is a dataframe
X = _is_dataframe(X)
# Check that input data contains same number of columns than
# the dataframe used to fit the imputer.
_check_input_matches_training_df(X, self.n_features_in_)
if self.transformer_.__class__.__name__ == "OneHotEncoder":
ohe_results_as_df = pd.DataFrame(
data=self.transformer_.transform(X[self.variables_]),
columns=self.transformer_.get_feature_names(self.variables_),
)
X = | pd.concat([X, ohe_results_as_df], axis=1) | pandas.concat |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([ | pd.Series([1, 2, 3], name="hi") | pandas.Series |
from datetime import datetime
import warnings
import logging
import os
import shutil
import zipfile
import typing
from pathlib import Path
import geopandas as gpd
import pandas as pd
import requests
from bs4 import BeautifulSoup
from utilities.helper_functions import FileFailedException, Failed_Files, check_dir, CHUNK_SIZE
pd.options.mode.chained_assignment = None
logger = logging.getLogger(__name__)
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_files_list(year: int, exclude_to_resume: typing.List[str]) -> typing.List[str]:
# url link to data
url = "https://coast.noaa.gov/htdata/CMSP/AISDataHandler/{0}/".format(year)
# request the html file
html_text = requests.get(url).text
# parse the html
soup = BeautifulSoup(html_text, 'html.parser')
# iterate over the <a> tags and save each in a list
files = []
for a in soup.find_all('a', href=True):
if a.text and a.text.endswith('zip'):
name = a['href'].split('.')[0]
name = name.split('/')[-1] if len(name.split('/')) > 1 else name
if name + '.csv' in exclude_to_resume + Failed_Files or name + '.gdb' in exclude_to_resume + Failed_Files or name + '.zip' in Failed_Files:
continue
files.append(a['href'])
return files
def chunkify_gdb(gdb_file: Path, file_path: Path) -> None:
end = CHUNK_SIZE
start = 0
header = True
while True:
gdf_chunk = gpd.read_file(gdb_file, rows=slice(start, end))
if len(gdf_chunk) == 0: break
gdf_chunk['LON'] = gdf_chunk.geometry.apply(lambda point: point.x)
gdf_chunk['LAT'] = gdf_chunk.geometry.apply(lambda point: point.y)
gdf_chunk.drop(columns=['geometry'], inplace=True)
gdf_chunk.to_csv(file_path, mode='a', header=header, index=False)
start = end
end += CHUNK_SIZE
header = False
def download_file(zipped_file_name: str, download_dir: Path, year: int) -> str:
try:
# url link to data
url = "https://coast.noaa.gov/htdata/CMSP/AISDataHandler/{0}/".format(year)
logger.info('downloading AIS file: %s' % zipped_file_name)
# download zip file using wget with url and file name
with requests.get(os.path.join(url, zipped_file_name), stream=True) as req:
req.raise_for_status()
zipped_file_name = zipped_file_name.split('/')[-1] if len(
zipped_file_name.split('/')) > 1 else zipped_file_name
with open(zipped_file_name, "wb") as handle:
for chunk in req.iter_content(chunk_size=8192):
handle.write(chunk)
handle.close()
# extract each zip file into output directory then delete it
with zipfile.ZipFile(zipped_file_name, 'r') as zip_ref:
for f in zip_ref.infolist():
if f.filename.endswith('.csv'):
f.filename = os.path.basename(f.filename)
file_name = f.filename
zip_ref.extract(f, download_dir)
if str(Path(f.filename).parent).endswith('.gdb'):
zip_ref.extractall(download_dir)
name = str(Path(f.filename).parent)
gdb_file = Path(download_dir, name)
file_name = name.split('.')[0] + '.csv'
file_path = Path(download_dir, file_name)
try:
chunkify_gdb(gdb_file, file_path)
except Exception as e:
# discard the file in case of an error to resume later properly
if file_path:
file_path.unlink(missing_ok=True)
raise e
shutil.rmtree(gdb_file)
break
os.remove(zipped_file_name)
return file_name
except Exception as e:
raise FileFailedException(zipped_file_name, e)
def download_year_AIS(year: int, download_dir: Path) -> None:
# create a directory named after the given year if not exist
resume_download = []
if download_dir.exists():
resume_download = check_dir(download_dir)
files = get_files_list(year, exclude_to_resume=resume_download)
# download
for zip_file_name in files:
download_file(zip_file_name, download_dir, year)
def rm_sec(date: datetime) -> datetime:
return date.replace(second=0, tzinfo=None)
def subsample_file(file_name, download_dir, filtered_dir, min_time_interval) -> str:
logging.info("Subsampling %s " % str(file_name))
header = True
try:
for df_chunk in pd.read_csv(Path(download_dir, file_name), chunksize=CHUNK_SIZE):
df_chunk = df_chunk.drop(['Unnamed: 0', 'MMSI', 'VesselName', 'CallSign', 'Cargo', 'TranscieverClass',
'ReceiverType', 'ReceiverID'], axis=1, errors='ignore')
df_chunk = df_chunk.dropna()
df_chunk['SOG'] = | pd.to_numeric(df_chunk['SOG']) | pandas.to_numeric |
#IMPORTS
import csv
import pandas as pd
import re
import nltk
import os
#LOADING FILES INTO ONE DF
PBP_data = "../nflscrapR-data/play_by_play_data/regular_season"
dfs = []
for season_file in os.listdir(PBP_data):
year = re.search("[0-9]{4}", season_file)
df = | pd.read_csv(PBP_data + "/" + season_file, usecols=['desc', 'play_type', 'defteam', 'posteam']) | pandas.read_csv |
import shlex
import os
import sys
import subprocess
import json
import pprint
import numpy as np
import pandas as pd
APPEND = '0ms'
if len(sys.argv) == 3:
APPEND = sys.argv[2]
LOG_BASE_DIR = '../logs/'
LOG_DIR = f'{LOG_BASE_DIR}/kem_{APPEND}'
PKL_DIR = './pkl/kem'
def parse_algo(l):
split = l.split('_')
ts = split[1]
run = split[-2]
algo = '_'.join(split[4:-2]).split('.')[0]
return (algo, ts, run)
def parse_bench(line, algo, ts, run):
line = line.rstrip()[7:]
d = dict(token.split('=') for token in shlex.split(line))
d['algo'] = algo
d['ts'] = ts
d['run'] = run
return d
def parse_time(line, algo, ts, run):
s = line.rstrip().split(' ')
return {'run': run, 'ts': ts, 'type': s[0], 'algo': algo, 'clock': s[1]}
def __get_frame_info(frame, d):
d['time'] = frame['frame.time']
d['time_delta'] = frame['frame.time_delta']
d['frame_nr'] = frame['frame.number']
d['frame_len'] = frame['frame.len']
return d
def __get_udp_info(udp, d):
d['src'] = udp['udp.srcport']
d['dst'] = udp['udp.dstport']
return d
def __get_rad_info(radius, d):
d['rad_len'] = radius['radius.length']
d['rad_code'] = radius['radius.code']
d['rad_id'] = radius['radius.id']
return d
def __parse_tls_real_type(__d):
if 'tls.handshake.type' in __d:
__d['tls_real_type'] = __d['tls.handshake.type']
elif 'tls.record.opaque_type' in __d:
__d['tls_real_type'] = __d['tls.record.opaque_type']
else:
__d['tls_real_type'] = __d['tls.record.content_type']
return __d
def __parse_tls_record_fields(record, __d):
for field in record:
if field == 'tls.record.version':
__d['tls.record.version'] = record['tls.record.version']
elif field == 'tls.record.opaque_type':
__d['tls.record.content_type'] = record['tls.record.opaque_type']
elif field == 'tls.record.content_type':
__d['tls.record.content_type'] = record['tls.record.content_type']
elif field == 'tls.record.length':
__d['tls.record.length'] = record['tls.record.length']
elif field == 'tls.handshake':
if 'tls.handshake.type' in record[field]:
__d['tls.handshake.type'] = record[field]['tls.handshake.type']
if 'tls.handshake.length' in record[field]:
__d['tls.handshake.length'] = record[field]['tls.handshake.length']
else:
pass
return __parse_tls_real_type(__d)
def __parse_eap(eap, _d):
_d['eap.id'] = eap['eap.id']
_d['eap.code'] = eap['eap.code']
_d['eap.len'] = eap['eap.len']
if 'eap.type' in eap:
_d['eap.type'] = eap['eap.type']
return _d
def parse_cap(capfile, algo, ts, run):
cap = []
tshark = ('tshark', '-n', '-2', '-r', capfile, '-T', 'json', '--no-duplicate-keys')
o = subprocess.Popen(tshark, stdout=subprocess.PIPE)
packets = json.loads(o.communicate()[0])
pkt_count = 0
for _x, packet in enumerate(packets):
d = {'algo': algo, 'ts': ts, 'run': run}
packet = packet['_source']
d = __get_frame_info(packet['layers']['frame'], d)
if 'radius' not in packet['layers']:
continue
d['frame_count'] = pkt_count
pkt_count += 1
d = __get_udp_info(packet['layers']['udp'], d)
d = __get_rad_info(packet['layers']['radius'], d)
radius = packet['layers']['radius']
for avp_count, x in enumerate(radius['Attribute Value Pairs']['radius.avp_tree']):
has_tls_layer = False
_d = d.copy()
_d['avp_count'] = avp_count
for k in x:
if k == 'radius.avp.type':
_d['rad_avp_t'] = x['radius.avp.type']
elif k == 'radius.avp.length':
_d['rad_avp_len'] = x['radius.avp.length']
elif k == 'eap':
if _x == 0:
assert(x[k]['eap.code'] == '2' and x[k]['eap.type'] == '1')
if _x == len(packets)-1:
assert(x[k]['eap.code'] == '3')
_d = __parse_eap(x[k], _d)
if 'tls' in x[k]:
if not isinstance(x[k]['tls'],str):
for _k in x[k]['tls']:
if _k == 'tls.record':
records = x[k]['tls'][_k]
if isinstance(records, dict):
records = [records]
if len(records) > 0:
has_tls_layer = True
for i, record in enumerate(records):
__d = __parse_tls_record_fields(record, _d.copy())
__d['record_count'] = i
cap.append(__d)
elif _k == 'Ignored Unknown Record':
pass
else:
print(d['frame_nr'])
pprint.pprint(x[k])
if not has_tls_layer:
cap.append(_d)
return cap
def parse_inst(instfile, algo, ts, run):
log = open(instfile,'r')
bench = []
time = []
for line in log.readlines():
if line.startswith('Bench: '):
bench.append(parse_bench(line, algo, ts, run))
elif line.startswith('time_'):
time.append(parse_time(line, algo, ts, run))
else:
continue
log.close()
return bench, time
def beautify_msg(_msg_cb):
_msg_cb['len'] = _msg_cb['len'].astype('int64')
_msg_cb['clock'] = _msg_cb['clock'].astype(float)
_msg_cb['clock_delta'] = _msg_cb['clock_delta'].astype(float)
_msg_cb['clock_abs'] = _msg_cb['clock_abs'].astype(float)
_msg_cb['time'] = _msg_cb['time'].astype(float)
_msg_cb['time_delta'] = _msg_cb['time_delta'].astype(float)
_msg_cb['time_abs'] = _msg_cb['time_abs'].astype(float)
_msg_cb['sum_len'] = _msg_cb['sum_len'].astype(float)
_msg_cb['n'] = _msg_cb['n'].astype(int)
_msg_cb = _msg_cb.reset_index().drop(['index', 'type'], axis = 1)
return _msg_cb
def beautify_info(_info_cb):
_info_cb['clock'] = _info_cb['clock'].astype(float)
_info_cb['clock_delta'] = _info_cb['clock_delta'].astype(float)
_info_cb['clock_abs'] = _info_cb['clock_abs'].astype(float)
_info_cb['time'] = _info_cb['clock_delta'].astype(float)
_info_cb['time_delta'] = _info_cb['time_delta'].astype(float)
_info_cb['time_abs'] = _info_cb['time_abs'].astype(float)
_info_cb['n'] = _info_cb['n'].astype(float)
_info_cb = _info_cb.reset_index().drop(['index', 'type'], axis = 1)
return _info_cb
def beautify_time(_time_df):
_time_df['clock'] = _time_df['clock'].astype('float')
#_time_df['cpu_time'] = _time_df['cpu_time'].astype('float')
#_time_df['wct'] = _time_df['wct'].astype('float')
_df_total = _time_df[_time_df['type'] == 'time_total']
_df_eap = _time_df[_time_df['type'] == 'time_eap']
return _df_total, _df_eap
def beautify_cap(_cap_df):
_cap_df['frame_nr'] = _cap_df['frame_nr'].astype(int)
_cap_df['ts'] = _cap_df['ts'].astype(int)
_cap_df['run'] = _cap_df['run'].astype(int)
_cap_df['time'] = pd.to_datetime(_cap_df['time'])
_cap_df['time_delta'] = _cap_df['time_delta'].astype(float)
_cap_df['frame_len'] = _cap_df['frame_len'].astype(int)
_cap_df['rad_len'] = _cap_df['rad_len'].astype(int)
_cap_df['rad_avp_len'] = _cap_df['rad_avp_len'].astype(int)
_cap_df['eap.len'] = _cap_df['eap.len'].astype(float)
_cap_df['tls.record.length'] = _cap_df['tls.record.length'].astype(float)
_cap_df['tls.handshake.length'] = _cap_df['tls.handshake.length'].astype(float)
return _cap_df
def beautify(bench, time, cap):
_msg_cb = None
_info_cb = None
_df_total = None
_df_eap = None
_cap_df = None
bench_df = pd.DataFrame(bench)
if len(bench_df) > 0:
_msg_cb = bench_df[bench_df['type'] == 'tls_msg_cb_bench'].copy().dropna(axis='columns')
_msg_cb = beautify_msg(_msg_cb)
if len(bench_df) > 0:
_info_cb = bench_df[bench_df['type'] == 'tls_info_cb_bench'].copy().dropna(axis='columns')
_info_cb = beautify_info(_info_cb)
time_df = pd.DataFrame(time)
if len(time_df) > 0:
_df_total, _df_eap = beautify_time(time_df)
_cap_df = pd.DataFrame(cap)
if len(_cap_df) > 0:
_cap_df = beautify_cap(_cap_df)
return _msg_cb, _info_cb, _df_total, _df_eap, _cap_df
def _parse(_min=0, _max=None):
bench = []
time = []
cap = []
dirlist = os.listdir(LOG_DIR)
if _max is None:
_max=len(dirlist)
for i, l in enumerate(dirlist):
if i < _min or i > _max:
continue
print(f'Parsing log {i}/{len(dirlist)}: {l}')
algo, ts, run = parse_algo(l)
if l.endswith('_inst.log'):
instfile = f'{LOG_DIR}/{l}'
a = []
b = []
a, b = parse_inst(instfile, algo, ts, run)
bench += a
time += b
elif l.endswith('.cap'):
capfile = f'{LOG_DIR}/{l}'
cap += parse_cap(capfile, algo, ts, run)
else:
print(f"Error unknown log {l}")
sys.exit(1)
return beautify(bench, time, cap)
def main(load=None, store=None):
if load is not None:
_msg_cb = pd.read_pickle(f"{PKL_DIR}/msg_cb_{APPEND}.pkl")
_info_cb = pd.read_pickle(f"{PKL_DIR}/info_cb_{APPEND}.pkl")
_df_total = pd.read_pickle(f"{PKL_DIR}/df_total_{APPEND}.pkl")
_df_eap = pd.read_pickle(f"{PKL_DIR}/df_eap_{APPEND}.pkl")
_cap_df = | pd.read_pickle(f"{PKL_DIR}/cap_df_{APPEND}.pkl") | pandas.read_pickle |
"""
__author__ = <NAME>
Many analysis functions for dF/F. Main class is CalciumReview.
"""
import pathlib
import re
import itertools
import warnings
from typing import Optional, Dict
import attr
from attr.validators import instance_of
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict
from enum import Enum
from scipy import stats
from calcium_bflow_analysis.dff_analysis_and_plotting import dff_analysis
from calcium_bflow_analysis.single_fov_analysis import filter_da
class Condition(Enum):
HYPER = "HYPER"
HYPO = "HYPO"
class AvailableFuncs(Enum):
""" Allowed analysis functions that can be used with CalciumReview.
The values of the enum variants are names of functions in dff_analysis.py """
AUC = "calc_total_auc_around_spikes"
MEAN = "calc_mean_auc_around_spikes"
MEDIAN = "calc_median_auc_around_spikes"
SPIKERATE = "calc_mean_spike_num"
@attr.s
class CalciumReview:
"""
Evaluate and analyze calcium data from TAC-like experiments.
The attributes ending with `_data` are pd.DataFrames that
contain the result of different function from dff_analysis.py. If you wish
to add a new function, first make sure that its output is
compatible with that of existing functions, then add a new
attribute to the class and a new variant to the enum,
and finally patch the __attrs_post_init__ method to include this
new attribute. Make sure to not change the order of the enum - add
the function at the bottom of that list.
"""
folder = attr.ib(validator=instance_of(pathlib.Path))
glob = attr.ib(default=r"*data_of_day_*.nc")
files = attr.ib(init=False)
days = attr.ib(init=False)
conditions = attr.ib(init=False)
df_columns = attr.ib(init=False)
funcs_dict = attr.ib(init=False)
raw_data = attr.ib(init=False)
auc_data = attr.ib(init=False)
mean_data = attr.ib(init=False)
spike_data = attr.ib(init=False)
def __attrs_post_init__(self):
"""
Find all files and parsed days for the experiment, and (partially) load them
into memory.
"""
self.files = []
self.raw_data = {}
all_files = self.folder.rglob(self.glob)
day_reg = re.compile(r".+?of_day_(\d+).nc")
parsed_days = []
print("Found the following files:")
day = 0
for file in all_files:
print(file)
self.files.append(file)
try:
day = int(day_reg.findall(file.name)[0])
except IndexError:
continue
parsed_days.append(day)
self.raw_data[day] = xr.open_dataset(file)
self.days = np.unique(np.array(parsed_days))
stats = ["_mean", "_std"]
self.conditions = list(set(self.raw_data[day].condition.values.tolist()))
self.df_columns = [
"".join(x) for x in itertools.product(self.conditions, stats)
] + ["t", "p"]
self.auc_data = pd.DataFrame(columns=self.df_columns)
self.mean_data = pd.DataFrame(columns=self.df_columns)
self.spike_data = pd.DataFrame(columns=self.df_columns)
# Map the function name to its corresponding DataFrame
self.funcs_dict = {
key: val
for key, val in zip(
AvailableFuncs.__members__.values(),
[self.auc_data, self.mean_data, self.spike_data],
)
}
def data_of_day(self, day: int, condition: Condition, epoch="spont"):
""" A function used to retrieve the "raw" data of dF/F, in the form of
cells x time, to the user. Supply a proper day, condition and epoch and receive a numpy array. """
try:
unselected_data = self.raw_data[day]
except KeyError:
print(f"The day {day} is invalid. Valid days are {self.days}.")
else:
return filter_da(unselected_data, condition=condition.value, epoch=epoch)
def apply_analysis_funcs_two_conditions(
self, funcs: list, epoch: str, mouse_id: Optional[str] = None
) -> pd.DataFrame:
""" Call the list of methods given to save time and memory. Applicable
if the dataset has two conditions, like left and right. Returns a DF
that can be used for later viz using seaborn."""
summary_df = pd.DataFrame()
for day, raw_datum in dict(sorted(self.raw_data.items())).items():
print(f"Analyzing day {day}...")
selected_first = filter_da(
raw_datum, condition=self.conditions[0], epoch=epoch, mouse_id=mouse_id,
)
selected_second = filter_da(
raw_datum, condition=self.conditions[1], epoch=epoch, mouse_id=mouse_id,
)
if selected_first.shape[0] == 0 or selected_second.shape[0] == 0:
continue
spikes_first = dff_analysis.locate_spikes_scipy(selected_first, self.raw_data[day].fps)
spikes_second = dff_analysis.locate_spikes_scipy(selected_second, self.raw_data[day].fps)
for func in funcs:
cond1 = getattr(dff_analysis, func.value)(spikes_first, selected_first, self.raw_data[day].fps)
cond1_label = np.full(cond1.shape, ca.conditions[0])
cond1_mean, cond1_sem = (
cond1.mean(),
cond1.std(ddof=1) / np.sqrt(cond1.shape[0]),
)
cond2 = getattr(dff_analysis, func.value)(spikes_second, selected_second, self.raw_data[day].fps)
cond2_label = np.full(cond2.shape, ca.conditions[1])
data = np.concatenate([cond1, cond2])
labels = np.concatenate([cond1_label, cond2_label])
df = pd.DataFrame({'data': np.nan_to_num(data), 'condition': labels, 'day': day, 'measure': func.value})
summary_df = summary_df.append(df)
cond2_mean, cond2_sem = (
cond2.mean(),
cond2.std(ddof=1) / np.sqrt(cond2.shape[0]),
)
t, p = stats.ttest_ind(cond1, cond2, equal_var=False)
df_dict = {
col: data
for col, data in zip(
self.df_columns,
[
cond1_mean,
cond1_sem,
cond2_mean,
cond2_sem,
t,
p,
],
)
}
self.funcs_dict[func] = self.funcs_dict[func].append(
| pd.DataFrame(df_dict, index=[day]) | pandas.DataFrame |
"""
author: muzexlxl
email: <EMAIL>
time series factors
bias: -1 0 1
neut: 1, 0
"""
import pandas as pd
import numpy as np
from datetime import datetime
import collections
import math
# import src.data.clickhouse_control as cc
class FactorX:
def __init__(self, id: list, timeframe: str, data_source: str, start: str, end: str):
# self.db_conn = cc.ClickHouse(data_source)
# self.db_conn = 0
# self.id = id
# if self.id[0] == 'symbol':
# self.database = self.db_conn.db_conf.db_processed
# self.data_table = self.db_conn.db_conf.processed_trade_data_main
# elif self.id[0] == 'code':
# self.database = self.db_conn.db_conf.db_raw
# self.data_table = self.db_conn.db_conf.raw_trade_data
# else:
# raise AttributeError(f'Wrong id type: {self.id[0]}')
# self.timeframe = timeframe
# self.data_source = data_source
# self.main_df = self.data_reader(start, end)
self.main_df = pd.DataFrame()
# def data_reader(self, start_date, end_date):
# sql_ = f"select `code`, `symbol`, `datetime`, `open`, `close`, " \
# f"`high`, `low`, `turnover`, `volume`, `open_interest` from " \
# f"{self.database}.{self.data_table} where `{self.id[0]}` = '{self.id[1]}' and " \
# f"`timeframe` = '{self.timeframe}' and `data_source` = '{self.data_source}' and " \
# f"`datetime` >= '{start_date}' and `datetime` <= '{end_date}'"
# df = self.db_conn.reader_to_dataframe(sql_)
# df['datetime'] = pd.to_datetime(df['datetime'])
# df['date'] = df['datetime'].dt.strftime("%Y-%m-%d")
# return df.set_index('datetime')
def reset_df(self, df: pd.DataFrame):
self.main_df = df
def factor_tmom_neut_01(self, w):
"""adx indicator"""
source_df = self.main_df.copy()
source_df['up'] = source_df['high'] - source_df['high'].shift()
source_df['down'] = source_df['low'].shift() - source_df['low']
source_df['dm+'] = np.where(
(source_df['up'] > source_df['down']) & (source_df['down'] > 0), source_df['up'], 0
)
source_df['dm-'] = np.where(
(source_df['down'] > source_df['up']) & (source_df['up'] > 0), source_df['down'], 0
)
source_df['hl'] = source_df['high'] - source_df['low']
source_df['hc'] = abs(source_df['high'] - source_df['close'])
source_df['lc'] = abs(source_df['low'] - source_df['close'])
source_df['atr'] = source_df[['hl', 'hc', 'lc']].max(axis=1).rolling(w).mean()
source_df['di+'] = (source_df['dm+'].rolling(w).mean() / source_df['atr']) * 100
source_df['di-'] = (source_df['dm-'].rolling(w).mean() / source_df['atr']) * 100
source_df['dx'] = ((source_df['di+'] - source_df['di-']) / (source_df['di+'] + source_df['di-'])) * 100
source_df['adx'] = source_df['dx'].rolling(w).mean()
source_df['factor'] = np.where(source_df['adx'] > 25, source_df['adx'], 0)
source_df['signal'] = np.where(
(source_df['factor'] / source_df['factor'].shift()).fillna(0) > 1,
1,
0
)
return source_df[['factor', 'signal']]
def factor_tmom_bias_01(self, w):
source_df = self.main_df.copy()
source_df['return_close'] = source_df['close'].diff() / source_df['close'].shift()
ls = source_df['return_close'].rolling(w).apply(
lambda x: pd.Series([(i/abs(i)) if abs(i) > 0 else 0 for i in x.cumsum()[::-5]]).mean()
)
source_df['factor'] = [i if abs(i) > 0.5 else 0 for i in ls]
source_df['signal'] = np.sign(source_df['factor'])
return source_df[['factor', 'signal']]
@staticmethod
def factor_compound(factors, w: [int, None], valve: int):
compounded_factor = | pd.DataFrame(factors) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pickle
import pyranges as pr
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
##your path to the files directory
file_path = cwd / 'files/'
usecols = ['Hugo_Symbol', 'Chromosome', 'Start_position', 'End_position', 'Variant_Classification', 'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele2', 'i_VAF', 'Tumor_Sample_Barcode', 'Donor_ID']
##from: https://dcc.icgc.org/releases/PCAWG/consensus_snv_indel
pcawg_maf = pd.read_csv(file_path / 'final_consensus_passonly.snv_mnv_indel.icgc.public.maf', sep='\t',
usecols=usecols,
low_memory=False)
##from: https://dcc.icgc.org/releases/PCAWG/donors_and_biospecimens
pcawg_sample_table = pd.read_csv(file_path / 'pcawg_sample_sheet.tsv', sep='\t', low_memory=False)
##limit samples to what's in the maf
pcawg_sample_table = pcawg_sample_table.loc[pcawg_sample_table['aliquot_id'].isin(pcawg_maf['Tumor_Sample_Barcode'].unique())]
pcawg_sample_table.drop_duplicates(['icgc_donor_id'], inplace=True)
pcawg_sample_table = pcawg_sample_table.loc[pcawg_sample_table['dcc_specimen_type'] != 'Cell line - derived from tumour']
##from: https://dcc.icgc.org/releases/current/Summary
pcawg_donor_table = | pd.read_csv(file_path / 'donor.all_projects.tsv', sep='\t', low_memory=False) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[3]:
reedsFile = str(Path().resolve().parent.parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v3a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="new installs PV")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
# In[4]:
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
# ### Reading GIS inputs
# In[5]:
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
# In[6]:
GIS.head()
# In[7]:
GIS.loc['p1'].long
# ### Create Scenarios in PV_ICE
# #### Rename difficult characters from Scenarios Names
# In[8]:
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[9]:
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
SFscenarios
# #### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[10]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r1.createScenario(name=PCAs[jj], file=filetitle)
r1.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r1.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r1.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r1.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r2.createScenario(name=PCAs[jj], file=filetitle)
r2.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r2.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r2.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r2.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(PCAs)):
filetitle = SFscenarios[i]+'_'+PCAs[jj]+'.csv'
filetitle = os.path.join(testfolder, 'PCAs', filetitle)
r3.createScenario(name=PCAs[jj], file=filetitle)
r3.scenario[PCAs[jj]].addMaterial('glass', file=r'..\baselines\SolarFutures_2021\baseline_material_glass_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('silicon', file=r'..\baselines\SolarFutures_2021\baseline_material_silicon_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('silver', file=r'..\baselines\SolarFutures_2021\baseline_material_silver_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('copper', file=r'..\baselines\SolarFutures_2021\baseline_material_copper_Reeds.csv')
r3.scenario[PCAs[jj]].addMaterial('aluminum', file=r'..\baselines\SolarFutures_2021\baseline_material_aluminium_Reeds.csv')
r3.scenario[PCAs[jj]].latitude = GIS.loc[PCAs[jj]].lat
r3.scenario[PCAs[jj]].longitude = GIS.loc[PCAs[jj]].long
# In[11]:
list(r1.scenario[PCAs[0]].data.year)
# In[12]:
r1.scenario[PCAs[0]].data
# # 2 FINISH: Set characteristics of Recycling to SF values.
# In[13]:
#r1.scenario[]
# #### Calculate Mass Flow
# In[14]:
IRENA= False
PERFECTMFG = True
mats = ['glass', 'silicon','silver','copper','aluminum']
ELorRL = 'EL'
if IRENA:
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta':30} # Regular-loss scenario IRENA
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.49, 'beta':30} # Regular-loss scenario IRENA
if PERFECTMFG:
for jj in range (0, len(r1.scenario.keys())):
r1.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r1.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
r2.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r2.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
r3.scenario[STATEs[jj]].data['mod_lifetime'] = 40
r3.scenario[STATEs[jj]].data['mod_MFG_eff'] = 100.0
for kk in range(0, len(mats)):
mat = mats[kk]
r1.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r2.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r3.scenario[STATEs[jj]].material[mat].materialdata['mat_MFG_eff'] = 100.0
r1.calculateMassFlow(weibullInputParams=weibullInputParams)
r2.calculateMassFlow(weibullInputParams=weibullInputParams)
r3.calculateMassFlow(weibullInputParams=weibullInputParams)
title_Method = 'Irena_'+ELorRL
else:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
title_Method = 'PVICE'
# In[15]:
print("PCAs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[PCAs[jj]].data.keys())
print("Material Keys: ", r1.scenario[PCAs[jj]].material['glass'].materialdata.keys())
# In[16]:
"""
r1.plotScenariosComparison(keyword='Cumulative_Area_disposedby_Failure')
r1.plotMaterialComparisonAcrossScenarios(material='silicon', keyword='mat_Total_Landfilled')
r1.scenario['p1'].data.head(21)
r2.scenario['p1'].data.head(21)
r3.scenario['p1'].data.head(21)
"""
pass
# # SAVE DATA FOR BILLY: PCAs
# ### PCA vs. Cumulative Waste by 2050
#
# In[17]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[18]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[PCAs[0]].data['year'].iloc[idx2050])
# #### 1 - PCA Cumulative Virgin Needs by 2050
# In[19]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(PCAs)):
keywordsum.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=PCAs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 1 - PCA Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 2 - PCA Cumulative EoL Only Waste by 2050
# In[20]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(PCAs)):
keywordsum.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=PCAs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 2 - PCA Cumulative2050 Waste EOL_tons.csv')
# #### 3 - PCA Yearly Virgin Needs 2030 2040 2050
# In[21]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(PCAs)):
keywordsum2030.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=PCAs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 3 - PCA Yearly 2030 2040 2050 VirginMaterialNeeds_tons.csv')
# #### 4 - PCA Yearly EoL Waste 2030 2040 2050
# In[22]:
keyword='mat_Total_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminum']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(PCAs)):
keywordsum2030.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[PCAs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=PCAs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tonnes
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' 4 - PCA Yearly 2030 2040 2050 Waste_EOL_tons.csv')
# # GEOPANDAS
# In[23]:
latitude_all =[]
longitude_all = []
cumulativewaste2050 = []
for scen in r1.scenario.keys():
latitude_all.append(r1.scenario[scen].latitude)
longitude_all.append(r1.scenario[scen].longitude)
cumulativewaste2050.append(r1.scenario[scen].material['glass'].materialdata['mat_Total_Landfilled'].sum())
# In[24]:
import pandas as pd
import matplotlib.pyplot as plt
import descartes
import geopandas as gpd
from shapely.geometry import Point, Polygon
#street_map = gpd.read_file(r'C:\Users\sayala\Desktop\geopandas\cb_2018_us_nation_20m\cb_2018_us_nation_20m.shp')
# Show the map only
#fig, ax = plt.subplots(figsize=(10,15))
#street_map.plot(ax=ax)
# In[25]:
frame = { 'Latitude': latitude_all, 'Longitude': longitude_all, 'CumulativeWaste2050': cumulativewaste2050}
df = pd.DataFrame(frame)
# In[26]:
df.head()
# In[27]:
geometry = [Point(xy) for xy in zip(df['Longitude'], df['Latitude'])]
geometry[:3]
# In[28]:
crs = {'init':'epsg:4326'}
# In[29]:
geo_df = gpd.GeoDataFrame(df, # specify our data
crs = crs, # specify our coordinate reference system
geometry = geometry) # specify the geometry list we created
geo_df.head()
# In[30]:
fig, ax = plt.subplots(figsize = (15,15))
street_map.plot(ax = ax, alpha = 0.4, color = "grey")
geo_df[geo_df['CumulativeWaste2050'] >= 1.918125e+09].plot(ax=ax, markersize = 20, color= "blue", marker = "o", label = "Bigger Than")
geo_df[geo_df['CumulativeWaste2050'] < 1.918125e+09].plot(ax=ax, markersize = 20, color= "red", marker = "o", label = "Less Than")
plt.xlim([-130, -60])
plt.ylim([20, 50])
plt.legend(prop={'size':15})
# In[ ]:
import random
import pandas as pd
import matplotlib.pyplot as plt
import descartes
import geopandas as gpd
from shapely.geometry import Point, Polygon
latitude = random.sample(range(25, 45), 10)
longitude = random.sample(range(-125, -65), 10)
weight = random.sample(range(0, 500), 10)
frame = { 'Latitude': latitude, 'Longitude': longitude, 'Weight': weight}
df = | pd.DataFrame(frame) | pandas.DataFrame |
# EcoFOCI
"""Contains a collection of ADCP equipment parsing.
These include:
* LR-ADCP
* Teledyne ADCP
* RCM ADCP
"""
import numpy as np
import pandas as pd
class adcp(object):
"""
"""
def __init__(self,serialno=None,depdir=None):
if depdir:
self.depdir = depdir + serialno
else:
self.depdir = None
def load_pg_file(self, pgfile_path=None, datetime_index=True):
"""load Pecent Good (PG) file
The four Percent Good values represent (in order):
1) The percentage of good three beam solutions (one beam rejected);
2) The percentage of good transformations (error velocity threshold not exceeded);
3) The percentage of measurements where more than one beam was bad;
4) The percentage of measurements with four beam solutions. <--- use this to qc data stream
Args:
pgfile_path (str, optional): full path to pg file. Defaults to ''.
"""
if self.depdir:
pgfile_path = self.depdir + '.PG'
self.pg_df = pd.read_csv(pgfile_path,delimiter='\s+',header=None,names=['date','time','bin','pg3beam-good','pgtransf-good','pg1beam-bad','pg4beam-good'])
self.pg_df["date_time"] = pd.to_datetime(self.pg_df.date+' '+self.pg_df.time,format="%y/%m/%d %H:%M:%S")
if datetime_index:
self.pg_df = self.pg_df.set_index(pd.DatetimeIndex(self.pg_df['date_time'])).drop(['date_time','date','time'],axis=1)
return self.pg_df
def load_ein_file(self, einfile_path=None, datetime_index=True):
if self.depdir:
einfile_path = self.depdir + '.EIN'
self.ein_df = | pd.read_csv(einfile_path,delimiter='\s+',header=None,names=['date','time','bin','agc1','agc2','agc3','agc4']) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 13:15:21 2020
@author: jm
"""
#%% required libraries
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%% read data
#df_original = pd.read_csv('https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv?cachebust=5805f0ab2859cf87', encoding = 'utf-8')
df_original = pd.read_csv('data/google_mobility_report_2020-07-25.csv', encoding = 'utf-8')
df = df_original.copy()
# check columns
df.columns
# see head of data frame
df.head()
#%% filter data for Argentina only
df = df[df['country_region'] == 'Argentina']
# check resulting data
df.info()
# check NA
df.isna().any()
df.isna().sum().plot(kind = 'bar')
# drop columns with many NA
df = df.drop(columns = ['country_region_code', 'sub_region_2', 'iso_3166_2_code', 'census_fips_code'])
# rename columns
df.rename(columns = {'country_region': 'pais',
'sub_region_1': 'provincia',
'date': 'fecha',
'retail_and_recreation_percent_change_from_baseline': 'retail_and_recreation',
'grocery_and_pharmacy_percent_change_from_baseline': 'grocery_and_pharmacy',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit_stations',
'workplaces_percent_change_from_baseline': 'workplaces',
'residential_percent_change_from_baseline': 'residential'},
inplace = True)
# drop row where 'provincia' is NA
df = df.dropna(subset = ['provincia'])
# check NA
df.isna().sum().plot(kind = 'bar')
#%% set index to plot the data
df['fecha'] = pd.to_datetime(df['fecha'])
df.set_index('fecha', inplace = True)
# check index
print(df.index)
#%% subsets
bsas = df[df['provincia'] == 'Buenos Aires Province']
caba = df[df['provincia'] == 'Buenos Aires']
#%% plot for CABA
plt.rcParams["figure.dpi"] = 1200
plt.figure(figsize = (10, 10))
fig, ax = plt.subplots()
# plot data
ax.plot(caba.index, caba['workplaces'], color = 'darkred', label = 'Workplaces')
ax.plot(caba.index, caba['retail_and_recreation'], color = 'darkblue', label = 'Retail and recreation')
# color the area of lockdown phase 1
p1 = caba['2020-07-01':'2020-07-17'].index
ax.fill_between(p1, -90, -30, facecolor = 'lightsteelblue', alpha = 0.3, label = 'Fase 1')
# annotate carnaval
ax.annotate('Carnaval', xy = [pd.Timestamp('2020-02-24'), -71],
xytext = [pd.Timestamp('2020-03-25'), 10],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia del trabajador
ax.annotate('Día del \ntrabajador', xy = [pd.Timestamp('2020-05-01'), -87],
xytext = [pd.Timestamp('2020-03-28'), -50],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate dia de la Revolucion de Mayo
ax.annotate('Día de la \nRevolución de Mayo', xy = [pd.Timestamp('2020-05-25'), -84],
xytext = [pd.Timestamp('2020-04-01'), -30],
arrowprops = {'arrowstyle' : '->', 'color' : 'gray'},
fontsize = 8)
# annotate paso a la inmortalidad <NAME>
ax.annotate('Paso a la inmortalidad \nGral. Güemes', xy = [p | d.Timestamp('2020-06-15') | pandas.Timestamp |
""" Broadly applicable NGS processing/analysis functionality """
import os
import re
import subprocess
import errno
from attmap import AttMapEcho
from yacman import load_yaml
from .exceptions import UnsupportedFiletypeException
from .utils import is_fastq, is_gzipped_fastq, is_sam_or_bam
class NGSTk(AttMapEcho):
"""
Class to hold functions to build command strings used during pipeline runs.
Object can be instantiated with a string of a path to a yaml `pipeline config file`.
Since NGSTk inherits from `AttMapEcho`, the passed config file and its elements
will be accessible through the NGSTk object as attributes under `config` (e.g.
`NGSTk.tools.java`). In case no `config_file` argument is passed, all commands will
be returned assuming the tool is in the user's $PATH.
:param str config_file: Path to pipeline yaml config file (optional).
:param pypiper.PipelineManager pm: A PipelineManager with which to associate this toolkit instance;
that is, essentially a source from which to grab paths to tools,
resources, etc.
:Example:
from pypiper.ngstk import NGSTk as tk
tk = NGSTk()
tk.samtools_index("sample.bam")
# returns: samtools index sample.bam
# Using a configuration file (custom executable location):
from pypiper.ngstk import NGSTk
tk = NGSTk("pipeline_config_file.yaml")
tk.samtools_index("sample.bam")
# returns: /home/.local/samtools/bin/samtools index sample.bam
"""
def __init__(self, config_file=None, pm=None):
# parse yaml into the project's attributes
# self.add_entries(**config)
super(NGSTk, self).__init__(
None if config_file is None else load_yaml(config_file))
# Keep a link to the pipeline manager, if one is provided.
# if None is provided, instantiate "tools" and "parameters" with empty AttMaps
# this allows the usage of the same code for a command with and without using a pipeline manager
if pm is not None:
self.pm = pm
if hasattr(pm.config, "tools"):
self.tools = self.pm.config.tools
else:
self.tools = AttMapEcho()
if hasattr(pm.config, "parameters"):
self.parameters = self.pm.config.parameters
else:
self.parameters = AttMapEcho()
else:
self.tools = AttMapEcho()
self.parameters = AttMapEcho()
# If pigz is available, use that. Otherwise, default to gzip.
if hasattr(self.pm, "cores") and self.pm.cores > 1 and self.check_command("pigz"):
self.ziptool_cmd = "pigz -f -p {}".format(self.pm.cores)
else:
self.ziptool_cmd = "gzip -f"
def _ensure_folders(self, *paths):
"""
Ensure that paths to folder(s) exist.
Some command-line tools will not attempt to create folder(s) needed
for output path to exist. They instead assume that they already are
present and will fail if that assumption does not hold.
:param Iterable[str] paths: Collection of path for which
"""
for p in paths:
# Only provide assurance for absolute paths.
if not p or not os.path.isabs(p):
continue
# See if what we're assuring is file- or folder-like.
fpath, fname = os.path.split(p)
base, ext = os.path.splitext(fname)
# If there's no extension, ensure that we have the whole path.
# Otherwise, just ensure that we have path to file's folder.
self.make_dir(fpath if ext else p)
@property
def ziptool(self):
"""
Returns the command to use for compressing/decompressing.
:return str: Either 'gzip' or 'pigz' if installed and multiple cores
"""
return self.ziptool_cmd
def make_dir(self, path):
"""
Forge path to directory, creating intermediates as needed.
:param str path: Path to create.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_sure_path_exists(self, path):
""" Alias for make_dir """
self.make_dir(path)
# Borrowed from looper
def check_command(self, command):
"""
Check if command can be called.
"""
# Use `command` to see if command is callable, store exit code
code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
# If exit code is not 0, report which command failed and return False, else return True
if code != 0:
print("Command is not callable: {0}".format(command))
return False
else:
return True
def get_file_size(self, filenames):
"""
Get size of all files in string (space-separated) in megabytes (Mb).
:param str filenames: a space-separated string of filenames
"""
# use (1024 ** 3) for gigabytes
# equivalent to: stat -Lc '%s' filename
# If given a list, recurse through it.
if type(filenames) is list:
return sum([self.get_file_size(filename) for filename in filenames])
return round(sum([float(os.stat(f).st_size) for f in filenames.split(" ")]) / (1024 ** 2), 4)
def mark_duplicates(self, aligned_file, out_file, metrics_file, remove_duplicates="True"):
cmd = self.tools.java
if self.pm.javamem: # If a memory restriction exists.
cmd += " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " MarkDuplicates"
cmd += " INPUT=" + aligned_file
cmd += " OUTPUT=" + out_file
cmd += " METRICS_FILE=" + metrics_file
cmd += " REMOVE_DUPLICATES=" + remove_duplicates
return cmd
def bam2fastq(self, input_bam, output_fastq,
output_fastq2=None, unpaired_fastq=None):
"""
Create command to convert BAM(s) to FASTQ(s).
:param str input_bam: Path to sequencing reads file to convert
:param output_fastq: Path to FASTQ to write
:param output_fastq2: Path to (R2) FASTQ to write
:param unpaired_fastq: Path to unpaired FASTQ to write
:return str: Command to convert BAM(s) to FASTQ(s)
"""
self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " INPUT={0}".format(input_bam)
cmd += " FASTQ={0}".format(output_fastq)
if output_fastq2 is not None and unpaired_fastq is not None:
cmd += " SECOND_END_FASTQ={0}".format(output_fastq2)
cmd += " UNPAIRED_FASTQ={0}".format(unpaired_fastq)
return cmd
def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end):
"""
Build command to convert BAM file to FASTQ file(s) (R1/R2).
:param str bam_file: path to BAM file with sequencing reads
:param str out_fastq_pre: path prefix for output FASTQ file(s)
:param bool paired_end: whether the given file contains paired-end
or single-end sequencing reads
:return str: file conversion command, ready to run
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " I=" + bam_file
cmd += " F=" + out_fastq_pre + "_R1.fastq"
if paired_end:
cmd += " F2=" + out_fastq_pre + "_R2.fastq"
cmd += " INCLUDE_NON_PF_READS=true"
cmd += " QUIET=true"
cmd += " VERBOSITY=ERROR"
cmd += " VALIDATION_STRINGENCY=SILENT"
return cmd
def bam_to_fastq_awk(self, bam_file, out_fastq_pre, paired_end, zipmode=False):
"""
This converts bam file to fastq files, but using awk. As of 2016, this is much faster
than the standard way of doing this using Picard, and also much faster than the
bedtools implementation as well; however, it does no sanity checks and assumes the reads
(for paired data) are all paired (no singletons), in the correct order.
:param bool zipmode: Should the output be zipped?
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = out_fastq_pre + "_R2.fastq"
if zipmode:
fq1 = fq1 + ".gz"
fq2 = fq2 + ".gz"
fq1_target = " | \"" + self.ziptool + " -c > " + fq1 + '"'
fq2_target = " | \"" + self.ziptool + " -c > " + fq2 + '"'
else:
fq1_target = ' > "' + fq1 + '"'
fq2_target = ' > "' + fq2 + '"'
if paired_end:
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ if (NR%2==1) print "@"$1"/1\n"$10"\n+\n"$11' + fq1_target + ';'
cmd += r' else print "@"$1"/2\n"$10"\n+\n"$11' + fq2_target + '; }'
cmd += "'" # end the awk command
else:
fq2 = None
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ print "@"$1"\n"$10"\n+\n"$11' + fq1_target + '; }'
cmd += "'"
return cmd, fq1, fq2
def bam_to_fastq_bedtools(self, bam_file, out_fastq_pre, paired_end):
"""
Converts bam to fastq; A version using bedtools
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = None
cmd = self.tools.bedtools + " bamtofastq -i " + bam_file + " -fq " + fq1 + ".fastq"
if paired_end:
fq2 = out_fastq_pre + "_R2.fastq"
cmd += " -fq2 " + fq2
return cmd, fq1, fq2
def get_input_ext(self, input_file):
"""
Get the extension of the input_file. Assumes you're using either
.bam or .fastq/.fq or .fastq.gz/.fq.gz.
"""
if input_file.endswith(".bam"):
input_ext = ".bam"
elif input_file.endswith(".fastq.gz") or input_file.endswith(".fq.gz"):
input_ext = ".fastq.gz"
elif input_file.endswith(".fastq") or input_file.endswith(".fq"):
input_ext = ".fastq"
else:
errmsg = "'{}'; this pipeline can only deal with .bam, .fastq, " \
"or .fastq.gz files".format(input_file)
raise UnsupportedFiletypeException(errmsg)
return input_ext
def merge_or_link(self, input_args, raw_folder, local_base="sample"):
"""
Standardizes various input possibilities by converting either .bam,
.fastq, or .fastq.gz files into a local file; merging those if multiple
files given.
:param list input_args: This is a list of arguments, each one is a
class of inputs (which can in turn be a string or a list).
Typically, input_args is a list with 2 elements: first a list of
read1 files; second an (optional!) list of read2 files.
:param str raw_folder: Name/path of folder for the merge/link.
:param str local_base: Usually the sample name. This (plus file
extension) will be the name of the local file linked (or merged)
by this function.
"""
self.make_sure_path_exists(raw_folder)
if not isinstance(input_args, list):
raise Exception("Input must be a list")
if any(isinstance(i, list) for i in input_args):
# We have a list of lists. Process each individually.
local_input_files = list()
n_input_files = len(filter(bool, input_args))
print("Number of input file sets: " + str(n_input_files))
for input_i, input_arg in enumerate(input_args):
# Count how many non-null items there are in the list;
# we only append _R1 (etc.) if there are multiple input files.
if n_input_files > 1:
local_base_extended = local_base + "_R" + str(input_i + 1)
else:
local_base_extended = local_base
if input_arg:
out = self.merge_or_link(
input_arg, raw_folder, local_base_extended)
print("Local input file: '{}'".format(out))
# Make sure file exists:
if not os.path.isfile(out):
print("Not a file: '{}'".format(out))
local_input_files.append(out)
return local_input_files
else:
# We have a list of individual arguments. Merge them.
if len(input_args) == 1:
# Only one argument in this list. A single input file; we just link
# it, regardless of file type:
# Pull the value out of the list
input_arg = input_args[0]
input_ext = self.get_input_ext(input_arg)
# Convert to absolute path
if not os.path.isabs(input_arg):
input_arg = os.path.abspath(input_arg)
# Link it to into the raw folder
local_input_abs = os.path.join(raw_folder, local_base + input_ext)
self.pm.run(
"ln -sf " + input_arg + " " + local_input_abs,
target=local_input_abs,
shell=True)
# return the local (linked) filename absolute path
return local_input_abs
else:
# Otherwise, there are multiple inputs.
# If more than 1 input file is given, then these are to be merged
# if they are in bam format.
if all([self.get_input_ext(x) == ".bam" for x in input_args]):
sample_merged = local_base + ".merged.bam"
output_merge = os.path.join(raw_folder, sample_merged)
cmd = self.merge_bams_samtools(input_args, output_merge)
self.pm.debug("cmd: {}".format(cmd))
self.pm.run(cmd, output_merge)
cmd2 = self.validate_bam(output_merge)
self.pm.run(cmd, output_merge, nofail=True)
return output_merge
# if multiple fastq
if all([self.get_input_ext(x) == ".fastq.gz" for x in input_args]):
sample_merged_gz = local_base + ".merged.fastq.gz"
output_merge_gz = os.path.join(raw_folder, sample_merged_gz)
#cmd1 = self.ziptool + "-d -c " + " ".join(input_args) + " > " + output_merge
#cmd2 = self.ziptool + " " + output_merge
#self.pm.run([cmd1, cmd2], output_merge_gz)
# you can save yourself the decompression/recompression:
cmd = "cat " + " ".join(input_args) + " > " + output_merge_gz
self.pm.run(cmd, output_merge_gz)
return output_merge_gz
if all([self.get_input_ext(x) == ".fastq" for x in input_args]):
sample_merged = local_base + ".merged.fastq"
output_merge = os.path.join(raw_folder, sample_merged)
cmd = "cat " + " ".join(input_args) + " > " + output_merge
self.pm.run(cmd, output_merge)
return output_merge
# At this point, we don't recognize the input file types or they
# do not match.
raise NotImplementedError(
"Input files must be of the same type, and can only "
"merge bam or fastq.")
def input_to_fastq(
self, input_file, sample_name, paired_end, fastq_folder,
output_file=None, multiclass=False, zipmode=False):
"""
Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to fastq which is still the
most common format for adapter trimmers, etc. You can specify you want
output either zipped or not.
Commands will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:param bool multiclass: Are both read1 and read2 included in a single
file? User should not need to set this; it will be inferred and used
in recursive calls, based on number files, and the paired_end arg.
:param bool zipmode: Should the output be .fastq.gz? Otherwise, just fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists.
"""
fastq_prefix = os.path.join(fastq_folder, sample_name)
self.make_sure_path_exists(fastq_folder)
# this expects a list; if it gets a string, wrap it in a list.
if type(input_file) != list:
input_file = [input_file]
# If multiple files were provided, recurse on each file individually
if len(input_file) > 1:
cmd = []
output_file = []
for in_i, in_arg in enumerate(input_file):
output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq"
result_cmd, uf, result_file = \
self.input_to_fastq(in_arg, sample_name, paired_end,
fastq_folder, output, multiclass=True,
zipmode=zipmode)
cmd.append(result_cmd)
output_file.append(result_file)
else:
# There was only 1 input class.
# Convert back into a string
input_file = input_file[0]
if not output_file:
output_file = fastq_prefix + "_R1.fastq"
if zipmode:
output_file = output_file + ".gz"
input_ext = self.get_input_ext(input_file) # handles .fq or .fastq
if input_ext == ".bam":
print("Found .bam file")
#cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end)
cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end, zipmode)
# pm.run(cmd, output_file, follow=check_fastq)
if fq2:
output_file = [fq1, fq2]
else:
output_file = fq1
elif input_ext == ".fastq.gz":
print("Found .fastq.gz file")
if paired_end and not multiclass:
if zipmode:
raise NotImplementedError("Can't use zipmode on interleaved fastq data.")
# For paired-end reads in one fastq file, we must split the
# file into 2. The pipeline author will need to include this
# python script in the scripts directory.
# TODO: make this self-contained in pypiper. This is a rare
# use case these days, as fastq files are almost never
# interleaved anymore.
script_path = os.path.join(
self.tools.scripts_dir, "fastq_split.py")
cmd = self.tools.python + " -u " + script_path
cmd += " -i " + input_file
cmd += " -o " + fastq_prefix
# Must also return the set of output files
output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"]
else:
if zipmode:
# we do nothing!
cmd = "ln -sf " + input_file + " " + output_file
print("Found .fq.gz file; no conversion necessary")
else:
# For single-end reads, we just unzip the fastq.gz file.
# or, paired-end reads that were already split.
cmd = self.ziptool + " -d -c " + input_file + " > " + output_file
# a non-shell version
# cmd1 = "gunzip --force " + input_file
# cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file
# cmd = [cmd1, cmd2]
elif input_ext == ".fastq":
if zipmode:
cmd = self.ziptool + " -c " + input_file + " > " + output_file
else:
cmd = "ln -sf " + input_file + " " + output_file
print("Found .fastq file; no conversion necessary")
return [cmd, fastq_prefix, output_file]
def check_fastq(self, input_files, output_files, paired_end):
"""
Returns a follow sanity-check function to be run after a fastq conversion.
Run following a command that will produce the fastq files.
This function will make sure any input files have the same number of reads as the
output files.
"""
# Define a temporary function which we will return, to be called by the
# pipeline.
# Must define default parameters here based on the parameters passed in. This locks
# these values in place, so that the variables will be defined when this function
# is called without parameters as a follow function by pm.run.
# This is AFTER merge, so if there are multiple files it means the
# files were split into read1/read2; therefore I must divide by number
# of files for final reads.
def temp_func(input_files=input_files, output_files=output_files,
paired_end=paired_end):
if type(input_files) != list:
input_files = [input_files]
if type(output_files) != list:
output_files = [output_files]
n_input_files = len(filter(bool, input_files))
n_output_files = len(filter(bool, output_files))
total_reads = sum([int(self.count_reads(input_file, paired_end))
for input_file in input_files])
raw_reads = int(total_reads / n_input_files)
self.pm.report_result("Raw_reads", str(raw_reads))
total_fastq_reads = sum(
[int(self.count_reads(output_file, paired_end))
for output_file in output_files])
fastq_reads = int(total_fastq_reads / n_output_files)
self.pm.report_result("Fastq_reads", fastq_reads)
input_ext = self.get_input_ext(input_files[0])
# We can only assess pass filter reads in bam files with flags.
if input_ext == ".bam":
num_failed_filter = sum(
[int(self.count_fail_reads(f, paired_end))
for f in input_files])
pf_reads = int(raw_reads) - num_failed_filter
self.pm.report_result("PF_reads", str(pf_reads))
if fastq_reads != int(raw_reads):
raise Exception("Fastq conversion error? Number of input reads "
"doesn't number of output reads.")
return fastq_reads
return temp_func
def check_trim(self, trimmed_fastq, paired_end, trimmed_fastq_R2=None, fastqc_folder=None):
"""
Build function to evaluate read trimming, and optionally run fastqc.
This is useful to construct an argument for the 'follow' parameter of
a PipelineManager's 'run' method.
:param str trimmed_fastq: Path to trimmed reads file.
:param bool paired_end: Whether the processing is being done with
paired-end sequencing data.
:param str trimmed_fastq_R2: Path to read 2 file for the paired-end case.
:param str fastqc_folder: Path to folder within which to place fastqc
output files; if unspecified, fastqc will not be run.
:return callable: Function to evaluate read trimming and possibly run
fastqc.
"""
def temp_func():
print("Evaluating read trimming")
if paired_end and not trimmed_fastq_R2:
print("WARNING: specified paired-end but no R2 file")
n_trim = float(self.count_reads(trimmed_fastq, paired_end))
self.pm.report_result("Trimmed_reads", int(n_trim))
try:
rr = float(self.pm.get_stat("Raw_reads"))
except:
print("Can't calculate trim loss rate without raw read result.")
else:
self.pm.report_result(
"Trim_loss_rate", round((rr - n_trim) * 100 / rr, 2))
# Also run a fastqc (if installed/requested)
if fastqc_folder:
if fastqc_folder and os.path.isabs(fastqc_folder):
self.make_sure_path_exists(fastqc_folder)
cmd = self.fastqc(trimmed_fastq, fastqc_folder)
self.pm.run(cmd, lock_name="trimmed_fastqc", nofail=True)
fname, ext = os.path.splitext(os.path.basename(trimmed_fastq))
fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html")
self.pm.report_object("FastQC report r1", fastqc_html)
if paired_end and trimmed_fastq_R2:
cmd = self.fastqc(trimmed_fastq_R2, fastqc_folder)
self.pm.run(cmd, lock_name="trimmed_fastqc_R2", nofail=True)
fname, ext = os.path.splitext(os.path.basename(trimmed_fastq_R2))
fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html")
self.pm.report_object("FastQC report r2", fastqc_html)
return temp_func
def validate_bam(self, input_bam):
"""
Wrapper for Picard's ValidateSamFile.
:param str input_bam: Path to file to validate.
:return str: Command to run for the validation.
"""
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " ValidateSamFile"
cmd += " INPUT=" + input_bam
return cmd
def merge_bams(self, input_bams, merged_bam, in_sorted="TRUE", tmp_dir=None):
"""
Combine multiple files into one.
The tmp_dir parameter is important because on poorly configured
systems, the default can sometimes fill up.
:param Iterable[str] input_bams: Paths to files to combine
:param str merged_bam: Path to which to write combined result.
:param bool | str in_sorted: Whether the inputs are sorted
:param str tmp_dir: Path to temporary directory.
"""
if not len(input_bams) > 1:
print("No merge required")
return 0
outdir, _ = os.path.split(merged_bam)
if outdir and not os.path.exists(outdir):
print("Creating path to merge file's folder: '{}'".format(outdir))
os.makedirs(outdir)
# Handle more intuitive boolean argument.
if in_sorted in [False, True]:
in_sorted = "TRUE" if in_sorted else "FALSE"
input_string = " INPUT=" + " INPUT=".join(input_bams)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " MergeSamFiles"
cmd += input_string
cmd += " OUTPUT=" + merged_bam
cmd += " ASSUME_SORTED=" + str(in_sorted)
cmd += " CREATE_INDEX=TRUE"
cmd += " VALIDATION_STRINGENCY=SILENT"
if tmp_dir:
cmd += " TMP_DIR=" + tmp_dir
return cmd
def merge_bams_samtools(self, input_bams, merged_bam):
cmd = self.tools.samtools + " merge -f "
cmd += " -@ " + str(self.pm.cores)
cmd += " " + merged_bam + " "
cmd += " ".join(input_bams)
return cmd
def merge_fastq(self, inputs, output, run=False, remove_inputs=False):
"""
Merge FASTQ files (zipped or not) into one.
:param Iterable[str] inputs: Collection of paths to files to merge.
:param str output: Path to single output file.
:param bool run: Whether to run the command.
:param bool remove_inputs: Whether to keep the original files.
:return NoneType | str: Null if running the command, otherwise the
command itself
:raise ValueError: Raise ValueError if the call is such that
inputs are to be deleted but command is not run.
"""
if remove_inputs and not run:
raise ValueError("Can't delete files if command isn't run")
cmd = "cat {} > {}".format(" ".join(inputs), output)
if run:
subprocess.check_call(cmd.split(), shell=True)
if remove_inputs:
cmd = "rm {}".format(" ".join(inputs))
subprocess.check_call(cmd.split(), shell=True)
else:
return cmd
def count_lines(self, file_name):
"""
Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc.
:param str file_name: name of file whose lines are to be counted
"""
x = subprocess.check_output("wc -l " + file_name + " | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '", shell=True)
return x.decode().strip()
def count_lines_zip(self, file_name):
"""
Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc.
For compressed files.
:param file: file_name
"""
x = subprocess.check_output(self.ziptool + " -d -c " + file_name + " | wc -l | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '", shell=True)
return x.decode().strip()
def get_chrs_from_bam(self, file_name):
"""
Uses samtools to grab the chromosomes from the header that are contained
in this bam file.
"""
x = subprocess.check_output(self.tools.samtools + " view -H " + file_name + " | grep '^@SQ' | cut -f2| sed s'/SN://'", shell=True)
# Chromosomes will be separated by newlines; split into list to return
return x.decode().split()
###################################
# Read counting functions
###################################
# In these functions, A paired-end read, with 2 sequences, counts as a two reads
def count_unique_reads(self, file_name, paired_end):
"""
Sometimes alignment software puts multiple locations for a single read; if you just count
those reads, you will get an inaccurate count. This is _not_ the same as multimapping reads,
which may or may not be actually duplicated in the bam file (depending on the alignment
software).
This function counts each read only once.
This accounts for paired end or not for free because pairs have the same read name.
In this function, a paired-end read would count as 2 reads.
"""
if file_name.endswith("sam"):
param = "-S"
if file_name.endswith("bam"):
param = ""
if paired_end:
r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
else:
r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = 0
return int(r1) + int(r2)
def count_unique_mapped_reads(self, file_name, paired_end):
"""
For a bam or sam file with paired or or single-end reads, returns the
number of mapped reads, counting each read only once, even if it appears
mapped at multiple locations.
:param str file_name: name of reads file
:param bool paired_end: True/False paired end data
:return int: Number of uniquely mapped reads.
"""
_, ext = os.path.splitext(file_name)
ext = ext.lower()
if ext == ".sam":
param = "-S -F4"
elif ext == "bam":
param = "-F4"
else:
raise ValueError("Not a SAM or BAM: '{}'".format(file_name))
if paired_end:
r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
else:
r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'")
r2 = 0
return int(r1) + int(r2)
def count_flag_reads(self, file_name, flag, paired_end):
"""
Counts the number of reads with the specified flag.
:param str file_name: name of reads file
:param str flag: sam flag value to be read
:param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
"""
param = " -c -f" + str(flag)
if file_name.endswith("sam"):
param += " -S"
return self.samtools_view(file_name, param=param)
def count_multimapping_reads(self, file_name, paired_end):
"""
Counts the number of reads that mapped to multiple locations. Warning:
currently, if the alignment software includes the reads at multiple locations, this function
will count those more than once. This function is for software that randomly assigns,
but flags reads as multimappers.
:param str file_name: name of reads file
:param paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
"""
return int(self.count_flag_reads(file_name, 256, paired_end))
def count_uniquelymapping_reads(self, file_name, paired_end):
"""
Counts the number of reads that mapped to a unique position.
:param str file_name: name of reads file
:param bool paired_end: This parameter is ignored.
"""
param = " -c -F256"
if file_name.endswith("sam"):
param += " -S"
return self.samtools_view(file_name, param=param)
def count_fail_reads(self, file_name, paired_end):
"""
Counts the number of reads that failed platform/vendor quality checks.
:param paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
"""
return int(self.count_flag_reads(file_name, 512, paired_end))
def samtools_view(self, file_name, param, postpend=""):
"""
Run samtools view, with flexible parameters and post-processing.
This is used internally to implement the various count_reads functions.
:param str file_name: file_name
:param str param: String of parameters to pass to samtools view
:param str postpend: String to append to the samtools command;
useful to add cut, sort, wc operations to the samtools view output.
"""
cmd = "{} view {} {} {}".format(
self.tools.samtools, param, file_name, postpend)
# in python 3, check_output returns a byte string which causes issues.
# with python 3.6 we could use argument: "encoding='UTF-8'""
return subprocess.check_output(cmd, shell=True).decode().strip()
def count_reads(self, file_name, paired_end):
"""
Count reads in a file.
Paired-end reads count as 2 in this function.
For paired-end reads, this function assumes that the reads are split
into 2 files, so it divides line count by 2 instead of 4.
This will thus give an incorrect result if your paired-end fastq files
are in only a single file (you must divide by 2 again).
:param str file_name: Name/path of file whose reads are to be counted.
:param bool paired_end: Whether the file contains paired-end reads.
"""
_, ext = os.path.splitext(file_name)
if not (is_sam_or_bam(file_name) or is_fastq(file_name)):
# TODO: make this an exception and force caller to handle that
# rather than relying on knowledge of possibility of negative value.
return -1
if is_sam_or_bam(file_name):
param_text = "-c" if ext == ".bam" else "-c -S"
return self.samtools_view(file_name, param=param_text)
else:
num_lines = self.count_lines_zip(file_name) \
if is_gzipped_fastq(file_name) \
else self.count_lines(file_name)
divisor = 2 if paired_end else 4
return int(num_lines) / divisor
def count_concordant(self, aligned_bam):
"""
Count only reads that "aligned concordantly exactly 1 time."
:param str aligned_bam: File for which to count mapped reads.
"""
cmd = self.tools.samtools + " view " + aligned_bam + " | "
cmd += "grep 'YT:Z:CP'" + " | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'"
return subprocess.check_output(cmd, shell=True).decode().strip()
def count_mapped_reads(self, file_name, paired_end):
"""
Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq,
and therefore, doesn't require a paired-end parameter because it only uses samtools view.
Therefore, it's ok that it has a default parameter, since this is discarded.
:param str file_name: File for which to count mapped reads.
:param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the option here just for consistency, since all the other
counting functions require the parameter. This makes it easier to swap counting functions during
pipeline development.
:return int: Either return code from samtools view command, or -1 to indicate an error state.
"""
if file_name.endswith("bam"):
return self.samtools_view(file_name, param="-c -F4")
if file_name.endswith("sam"):
return self.samtools_view(file_name, param="-c -F4 -S")
return -1
def sam_conversions(self, sam_file, depth=True):
"""
Convert sam files to bam files, then sort and index them for later use.
:param bool depth: also calculate coverage over each position
"""
cmd = self.tools.samtools + " view -bS " + sam_file + " > " + sam_file.replace(".sam", ".bam") + "\n"
cmd += self.tools.samtools + " sort " + sam_file.replace(".sam", ".bam") + " -o " + sam_file.replace(".sam", "_sorted.bam") + "\n"
cmd += self.tools.samtools + " index " + sam_file.replace(".sam", "_sorted.bam") + "\n"
if depth:
cmd += self.tools.samtools + " depth " + sam_file.replace(".sam", "_sorted.bam") + " > " + sam_file.replace(".sam", "_sorted.depth") + "\n"
return cmd
def bam_conversions(self, bam_file, depth=True):
"""
Sort and index bam files for later use.
:param bool depth: also calculate coverage over each position
"""
cmd = self.tools.samtools + " view -h " + bam_file + " > " + bam_file.replace(".bam", ".sam") + "\n"
cmd += self.tools.samtools + " sort " + bam_file + " -o " + bam_file.replace(".bam", "_sorted.bam") + "\n"
cmd += self.tools.samtools + " index " + bam_file.replace(".bam", "_sorted.bam") + "\n"
if depth:
cmd += self.tools.samtools + " depth " + bam_file.replace(".bam", "_sorted.bam") + " > " + bam_file.replace(".bam", "_sorted.depth") + "\n"
return cmd
def fastqc(self, file, output_dir):
"""
Create command to run fastqc on a FASTQ file
:param str file: Path to file with sequencing reads
:param str output_dir: Path to folder in which to place output
:return str: Command with which to run fastqc
"""
# You can find the fastqc help with fastqc --help
try:
pm = self.pm
except AttributeError:
# Do nothing, this is just for path construction.
pass
else:
if not os.path.isabs(output_dir) and pm is not None:
output_dir = os.path.join(pm.outfolder, output_dir)
self.make_sure_path_exists(output_dir)
return "{} --noextract --outdir {} {}".\
format(self.tools.fastqc, output_dir, file)
def fastqc_rename(self, input_bam, output_dir, sample_name):
"""
Create pair of commands to run fastqc and organize files.
The first command returned is the one that actually runs fastqc when
it's executed; the second moves the output files to the output
folder for the sample indicated.
:param str input_bam: Path to file for which to run fastqc.
:param str output_dir: Path to folder in which fastqc output will be
written, and within which the sample's output folder lives.
:param str sample_name: Sample name, which determines subfolder within
output_dir for the fastqc files.
:return list[str]: Pair of commands, to run fastqc and then move the files to
their intended destination based on sample name.
"""
cmds = list()
initial = os.path.splitext(os.path.basename(input_bam))[0]
cmd1 = self.fastqc(input_bam, output_dir)
cmds.append(cmd1)
cmd2 = "if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi".format(
os.path.join(output_dir, initial), os.path.join(output_dir, sample_name))
cmds.append(cmd2)
return cmds
def samtools_index(self, bam_file):
"""Index a bam file."""
cmd = self.tools.samtools + " index {0}".format(bam_file)
return cmd
def slurm_header(
self, job_name, output, queue="shortq", n_tasks=1, time="10:00:00",
cpus_per_task=8, mem_per_cpu=2000, nodes=1, user_mail="", mail_type="end"):
cmd = """ #!/bin/bash
#SBATCH --partition={0}
#SBATCH --ntasks={1}
#SBATCH --time={2}
#SBATCH --cpus-per-task={3}
#SBATCH --mem-per-cpu={4}
#SBATCH --nodes={5}
#SBATCH --job-name={6}
#SBATCH --output={7}
#SBATCH --mail-type={8}
#SBATCH --mail-user={9}
# Start running the job
hostname
date
""".format(
queue, n_tasks, time, cpus_per_task, mem_per_cpu,
nodes, job_name, output, mail_type, user_mail)
return cmd
def slurm_footer(self):
return " date"
def slurm_submit_job(self, job_file):
return os.system("sbatch %s" % job_file)
def remove_file(self, file_name):
return "rm {0}".format(file_name)
def move_file(self, old, new):
return "mv {0} {1}".format(old, new)
def preseq_curve(self, bam_file, output_prefix):
return """
preseq c_curve -B -P -o {0}.yield.txt {1}
""".format(output_prefix, bam_file)
def preseq_extrapolate(self, bam_file, output_prefix):
return """
preseq lc_extrap -v -B -P -e 1e+9 -o {0}.future_yield.txt {1}
""".format(output_prefix, bam_file)
def preseq_coverage(self, bam_file, output_prefix):
return """
preseq gc_extrap -o {0}.future_coverage.txt {1}
""".format(output_prefix, bam_file)
def trimmomatic(
self, input_fastq1, output_fastq1, cpus, adapters, log,
input_fastq2=None, output_fastq1_unpaired=None,
output_fastq2=None, output_fastq2_unpaired=None):
PE = False if input_fastq2 is None else True
pe = "PE" if PE else "SE"
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.trimmomatic
cmd += " {0} -threads {1} -trimlog {2} {3}".format(pe, cpus, log, input_fastq1)
if PE:
cmd += " {0}".format(input_fastq2)
cmd += " {0}".format(output_fastq1)
if PE:
cmd += " {0} {1} {2}".format(output_fastq1_unpaired, output_fastq2, output_fastq2_unpaired)
cmd += " ILLUMINACLIP:{0}:1:40:15:8:true".format(adapters)
cmd += " LEADING:3 TRAILING:3"
cmd += " SLIDINGWINDOW:4:10"
cmd += " MINLEN:36"
return cmd
def skewer(
self, input_fastq1, output_prefix, output_fastq1,
log, cpus, adapters, input_fastq2=None, output_fastq2=None):
"""
Create commands with which to run skewer.
:param str input_fastq1: Path to input (read 1) FASTQ file
:param str output_prefix: Prefix for output FASTQ file names
:param str output_fastq1: Path to (read 1) output FASTQ file
:param str log: Path to file to which to write logging information
:param int | str cpus: Number of processing cores to allow
:param str adapters: Path to file with sequencing adapters
:param str input_fastq2: Path to read 2 input FASTQ file
:param str output_fastq2: Path to read 2 output FASTQ file
:return list[str]: Sequence of commands to run to trim reads with
skewer and rename files as desired.
"""
pe = input_fastq2 is not None
mode = "pe" if pe else "any"
cmds = list()
cmd1 = self.tools.skewer + " --quiet"
cmd1 += " -f sanger"
cmd1 += " -t {0}".format(cpus)
cmd1 += " -m {0}".format(mode)
cmd1 += " -x {0}".format(adapters)
cmd1 += " -o {0}".format(output_prefix)
cmd1 += " {0}".format(input_fastq1)
if input_fastq2 is None:
cmds.append(cmd1)
else:
cmd1 += " {0}".format(input_fastq2)
cmds.append(cmd1)
if input_fastq2 is None:
cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed.fastq", output_fastq1)
cmds.append(cmd2)
else:
cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed-pair1.fastq", output_fastq1)
cmds.append(cmd2)
cmd3 = "mv {0} {1}".format(output_prefix + "-trimmed-pair2.fastq", output_fastq2)
cmds.append(cmd3)
cmd4 = "mv {0} {1}".format(output_prefix + "-trimmed.log", log)
cmds.append(cmd4)
return cmds
def bowtie2_map(self, input_fastq1, output_bam, log, metrics, genome_index, max_insert, cpus, input_fastq2=None):
# Admits 2000bp-long fragments (--maxins option)
cmd = self.tools.bowtie2 + " --very-sensitive --no-discordant -p {0}".format(cpus)
cmd += " -x {0}".format(genome_index)
cmd += " --met-file {0}".format(metrics)
if input_fastq2 is None:
cmd += " {0} ".format(input_fastq1)
else:
cmd += " --maxins {0}".format(max_insert)
cmd += " -1 {0}".format(input_fastq1)
cmd += " -2 {0}".format(input_fastq2)
cmd += " 2> {0} | samtools view -S -b - | samtools sort -o {1} -".format(log, output_bam)
return cmd
def topHat_map(self, input_fastq, output_dir, genome, transcriptome, cpus):
# TODO:
# Allow paired input
cmd = self.tools.tophat + " --GTF {0} --b2-L 15 --library-type fr-unstranded --mate-inner-dist 120".format(transcriptome)
cmd += " --max-multihits 100 --no-coverage-search"
cmd += " --num-threads {0} --output-dir {1} {2} {3}".format(cpus, output_dir, genome, input_fastq)
return cmd
def picard_mark_duplicates(self, input_bam, output_bam, metrics_file, temp_dir="."):
transient_file = re.sub("\.bam$", "", output_bam) + ".dups.nosort.bam"
output_bam = re.sub("\.bam$", "", output_bam)
cmd1 = self.tools.java + " -Xmx" + self.pm.javamem
cmd1 += " -jar `which MarkDuplicates.jar`"
cmd1 += " INPUT={0}".format(input_bam)
cmd1 += " OUTPUT={0}".format(transient_file)
cmd1 += " METRICS_FILE={0}".format(metrics_file)
cmd1 += " VALIDATION_STRINGENCY=LENIENT"
cmd1 += " TMP_DIR={0}".format(temp_dir)
# Sort bam file with marked duplicates
cmd2 = self.tools.samtools + " sort {0} {1}".format(transient_file, output_bam)
# Remove transient file
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(transient_file)
return [cmd1, cmd2, cmd3]
def sambamba_remove_duplicates(self, input_bam, output_bam, cpus=16):
cmd = self.tools.sambamba + " markdup -t {0} -r {1} {2}".format(cpus, input_bam, output_bam)
return cmd
def get_mitochondrial_reads(self, bam_file, output, cpus=4):
"""
"""
tmp_bam = bam_file + "tmp_rmMe"
cmd1 = self.tools.sambamba + " index -t {0} {1}".format(cpus, bam_file)
cmd2 = self.tools.sambamba + " slice {0} chrM | {1} markdup -t 4 /dev/stdin {2} 2> {3}".format(bam_file, self.tools.sambamba, tmp_bam, output)
cmd3 = "rm {}".format(tmp_bam)
return [cmd1, cmd2, cmd3]
def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30):
"""
Remove duplicates, filter for >Q, remove multiple mapping reads.
For paired-end reads, keep only proper pairs.
"""
nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam"
cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file)
cmd2 = self.tools.sambamba + ' view -t {0} -f bam --valid'.format(cpus)
if paired:
cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair'
else:
cmd2 += ' -F "not unmapped'
cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"'.format(Q)
cmd2 += ' {0} |'.format(nodups)
cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam)
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups)
cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai")
return [cmd1, cmd2, cmd3, cmd4]
def shift_reads(self, input_bam, genome, output_bam):
# output_bam = re.sub("\.bam$", "", output_bam)
cmd = self.tools.samtools + " view -h {0} |".format(input_bam)
cmd += " shift_reads.py {0} |".format(genome)
cmd += " " + self.tools.samtools + " view -S -b - |"
cmd += " " + self.tools.samtools + " sort -o {0} -".format(output_bam)
return cmd
def sort_index_bam(self, input_bam, output_bam):
tmp_bam = re.sub("\.bam", ".sorted", input_bam)
cmd1 = self.tools.samtools + " sort {0} {1}".format(input_bam, tmp_bam)
cmd2 = "mv {0}.bam {1}".format(tmp_bam, output_bam)
cmd3 = self.tools.samtools + " index {0}".format(output_bam)
return [cmd1, cmd2, cmd3]
def index_bam(self, input_bam):
cmd = self.tools.samtools + " index {0}".format(input_bam)
return cmd
def run_spp(self, input_bam, output, plot, cpus):
"""
Run the SPP read peak analysis tool.
:param str input_bam: Path to reads file
:param str output: Path to output file
:param str plot: Path to plot file
:param int cpus: Number of processors to use
:return str: Command with which to run SPP
"""
base = "{} {} -rf -savp".format(self.tools.Rscript, self.tools.spp)
cmd = base + " -savp={} -s=0:5:500 -c={} -out={} -p={}".format(
plot, input_bam, output, cpus)
return cmd
def get_fragment_sizes(self, bam_file):
try:
import pysam
import numpy as np
except:
return
frag_sizes = list()
bam = pysam.Samfile(bam_file, 'rb')
for read in bam:
if bam.getrname(read.tid) != "chrM" and read.tlen < 1500:
frag_sizes.append(read.tlen)
bam.close()
return np.array(frag_sizes)
def plot_atacseq_insert_sizes(self, bam, plot, output_csv, max_insert=1500, smallest_insert=30):
"""
Heavy inspiration from here:
https://github.com/dbrg77/ATAC/blob/master/ATAC_seq_read_length_curve_fitting.ipynb
"""
try:
import pysam
import numpy as np
import matplotlib.mlab as mlab
from scipy.optimize import curve_fit
from scipy.integrate import simps
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
print("Necessary Python modules couldn't be loaded.")
return
try:
import seaborn as sns
sns.set_style("whitegrid")
except:
pass
def get_fragment_sizes(bam, max_insert=1500):
frag_sizes = list()
bam = pysam.Samfile(bam, 'rb')
for i, read in enumerate(bam):
if read.tlen < max_insert:
frag_sizes.append(read.tlen)
bam.close()
return np.array(frag_sizes)
def mixture_function(x, *p):
"""
Mixture function to model four gaussian (nucleosomal)
and one exponential (nucleosome-free) distributions.
"""
m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = p
nfr = expo(x, 2.9e-02, 2.8e-02)
nfr[:smallest_insert] = 0
return (mlab.normpdf(x, m1, s1) * w1 +
mlab.normpdf(x, m2, s2) * w2 +
mlab.normpdf(x, m3, s3) * w3 +
mlab.normpdf(x, m4, s4) * w4 +
nfr)
def expo(x, q, r):
"""
Exponential function.
"""
return q * np.exp(-r * x)
# get fragment sizes
frag_sizes = get_fragment_sizes(bam)
# bin
numBins = np.linspace(0, max_insert, max_insert + 1)
y, scatter_x = np.histogram(frag_sizes, numBins, density=1)
# get the mid-point of each bin
x = (scatter_x[:-1] + scatter_x[1:]) / 2
# Parameters are empirical, need to check
paramGuess = [
200, 50, 0.7, # gaussians
400, 50, 0.15,
600, 50, 0.1,
800, 55, 0.045,
2.9e-02, 2.8e-02 # exponential
]
try:
popt3, pcov3 = curve_fit(
mixture_function, x[smallest_insert:], y[smallest_insert:],
p0=paramGuess, maxfev=100000)
except:
print("Nucleosomal fit could not be found.")
return
m1, s1, w1, m2, s2, w2, m3, s3, w3, m4, s4, w4, q, r = popt3
# Plot
plt.figure(figsize=(12, 12))
# Plot distribution
plt.hist(frag_sizes, numBins, histtype="step", ec="k", normed=1, alpha=0.5)
# Plot nucleosomal fits
plt.plot(x, mlab.normpdf(x, m1, s1) * w1, 'r-', lw=1.5, label="1st nucleosome")
plt.plot(x, mlab.normpdf(x, m2, s2) * w2, 'g-', lw=1.5, label="2nd nucleosome")
plt.plot(x, mlab.normpdf(x, m3, s3) * w3, 'b-', lw=1.5, label="3rd nucleosome")
plt.plot(x, mlab.normpdf(x, m4, s4) * w4, 'c-', lw=1.5, label="4th nucleosome")
# Plot nucleosome-free fit
nfr = expo(x, 2.9e-02, 2.8e-02)
nfr[:smallest_insert] = 0
plt.plot(x, nfr, 'k-', lw=1.5, label="nucleosome-free")
# Plot sum of fits
ys = mixture_function(x, *popt3)
plt.plot(x, ys, 'k--', lw=3.5, label="fit sum")
plt.legend()
plt.xlabel("Fragment size (bp)")
plt.ylabel("Density")
plt.savefig(plot, bbox_inches="tight")
# Integrate curves and get areas under curve
areas = [
["fraction", "area under curve", "max density"],
["Nucleosome-free fragments", simps(nfr), max(nfr)],
["1st nucleosome", simps(mlab.normpdf(x, m1, s1) * w1), max(mlab.normpdf(x, m1, s1) * w1)],
["2nd nucleosome", simps(mlab.normpdf(x, m2, s2) * w1), max(mlab.normpdf(x, m2, s2) * w2)],
["3rd nucleosome", simps(mlab.normpdf(x, m3, s3) * w1), max(mlab.normpdf(x, m3, s3) * w3)],
["4th nucleosome", simps(mlab.normpdf(x, m4, s4) * w1), max(mlab.normpdf(x, m4, s4) * w4)]
]
try:
import csv
with open(output_csv, "w") as f:
writer = csv.writer(f)
writer.writerows(areas)
except:
pass
# TODO: parameterize in terms of normalization factor.
def bam_to_bigwig(
self, input_bam, output_bigwig, genome_sizes, genome,
tagmented=False, normalize=False, norm_factor=1000):
"""
Convert a BAM file to a bigWig file.
:param str input_bam: path to BAM file to convert
:param str output_bigwig: path to which to write file in bigwig format
:param str genome_sizes: path to file with chromosome size information
:param str genome: name of genomic assembly
:param bool tagmented: flag related to read-generating protocol
:param bool normalize: whether to normalize coverage
:param int norm_factor: number of bases to use for normalization
:return list[str]: sequence of commands to execute
"""
# TODO:
# addjust fragment length dependent on read size and real fragment size
# (right now it asssumes 50bp reads with 180bp fragments)
cmds = list()
transient_file = os.path.abspath(re.sub("\.bigWig", "", output_bigwig))
cmd1 = self.tools.bedtools + " bamtobed -i {0} |".format(input_bam)
if not tagmented:
cmd1 += " " + self.tools.bedtools + " slop -i stdin -g {0} -s -l 0 -r 130 |".format(genome_sizes)
cmd1 += " fix_bedfile_genome_boundaries.py {0} |".format(genome)
cmd1 += " " + self.tools.genomeCoverageBed + " {0}-bg -g {1} -i stdin > {2}.cov".format(
"-5 " if tagmented else "",
genome_sizes,
transient_file
)
cmds.append(cmd1)
if normalize:
cmds.append("""awk 'NR==FNR{{sum+= $4; next}}{{ $4 = ($4 / sum) * {1}; print}}' {0}.cov {0}.cov | sort -k1,1 -k2,2n > {0}.normalized.cov""".format(transient_file, norm_factor))
cmds.append(self.tools.bedGraphToBigWig + " {0}{1}.cov {2} {3}".format(transient_file, ".normalized" if normalize else "", genome_sizes, output_bigwig))
# remove tmp files
cmds.append("if [[ -s {0}.cov ]]; then rm {0}.cov; fi".format(transient_file))
if normalize:
cmds.append("if [[ -s {0}.normalized.cov ]]; then rm {0}.normalized.cov; fi".format(transient_file))
cmds.append("chmod 755 {0}".format(output_bigwig))
return cmds
def add_track_to_hub(self, sample_name, track_url, track_hub, colour, five_prime=""):
cmd1 = """echo "track type=bigWig name='{0} {1}' description='{0} {1}'""".format(sample_name, five_prime)
cmd1 += """ height=32 visibility=full maxHeightPixels=32:32:25 bigDataUrl={0} color={1}" >> {2}""".format(track_url, colour, track_hub)
cmd2 = "chmod 755 {0}".format(track_hub)
return [cmd1, cmd2]
def link_to_track_hub(self, track_hub_url, file_name, genome):
import textwrap
db = "org" if genome == "hg19" else "db" # different database call for human
genome = "human" if genome == "hg19" else genome # change hg19 to human
html = """
<html>
<head>
<meta http-equiv="refresh" content="0; url=http://genome.ucsc.edu/cgi-bin/hgTracks?"""
html += """{db}={genome}&hgt.customText={track_hub_url}" />
</head>
</html>
""".format(track_hub_url=track_hub_url, genome=genome, db=db)
with open(file_name, 'w') as handle:
handle.write(textwrap.dedent(html))
def htseq_count(self, input_bam, gtf, output):
sam = input_bam.replace("bam", "sam")
cmd1 = "samtools view {0} > {1}".format(input_bam, sam)
cmd2 = "htseq-count -f sam -t exon -i transcript_id -m union {0} {1} > {2}".format(sam, gtf, output)
cmd3 = "rm {0}".format(sam)
return [cmd1, cmd2, cmd3]
def kallisto(self, input_fastq, output_dir, output_bam, transcriptome_index, cpus, input_fastq2=None, size=180, b=200):
cmd1 = self.tools.kallisto + " quant --bias --pseudobam -b {0} -l {1} -i {2} -o {3} -t {4}".format(b, size, transcriptome_index, output_dir, cpus)
if input_fastq2 is None:
cmd1 += " --single {0}".format(input_fastq)
else:
cmd1 += " {0} {1}".format(input_fastq, input_fastq2)
cmd1 += " | " + self.tools.samtools + " view -Sb - > {0}".format(output_bam)
cmd2 = self.tools.kallisto + " h5dump -o {0} {0}/abundance.h5".format(output_dir)
return [cmd1, cmd2]
def genome_wide_coverage(self, input_bam, genome_windows, output):
cmd = self.tools.bedtools + " coverage -counts -abam {0} -b {1} > {2}".format(input_bam, genome_windows, output)
return cmd
def calc_frip(self, input_bam, input_bed, threads=4):
"""
Calculate fraction of reads in peaks.
A file of with a pool of sequencing reads and a file with peak call
regions define the operation that will be performed. Thread count
for samtools can be specified as well.
:param str input_bam: sequencing reads file
:param str input_bed: file with called peak regions
:param int threads: number of threads samtools may use
:return float: fraction of reads in peaks defined in given peaks file
"""
cmd = self.simple_frip(input_bam, input_bed, threads)
return subprocess.check_output(cmd.split(" "), shell=True).decode().strip()
def simple_frip(self, input_bam, input_bed, threads=4):
cmd = "{} view".format(self.tools.samtools)
cmd += " -@ {} -c -L {}".format(threads, input_bed)
cmd += " " + input_bam
return cmd
def calculate_frip(self, input_bam, input_bed, output, cpus=4):
cmd = self.tools.sambamba + " depth region -t {0}".format(cpus)
cmd += " -L {0}".format(input_bed)
cmd += " {0}".format(input_bam)
cmd += " | awk '{{sum+=$5}} END {{print sum}}' > {0}".format(output)
return cmd
def macs2_call_peaks(
self, treatment_bams, output_dir, sample_name, genome,
control_bams=None, broad=False, paired=False,
pvalue=None, qvalue=None, include_significance=None):
"""
Use MACS2 to call peaks.
:param str | Iterable[str] treatment_bams: Paths to files with data to
regard as treatment.
:param str output_dir: Path to output folder.
:param str sample_name: Name for the sample involved.
:param str genome: Name of the genome assembly to use.
:param str | Iterable[str] control_bams: Paths to files with data to
regard as control
:param bool broad: Whether to do broad peak calling.
:param bool paired: Whether reads are paired-end
:param float | NoneType pvalue: Statistical significance measure to
pass as --pvalue to peak calling with MACS
:param float | NoneType qvalue: Statistical significance measure to
pass as --qvalue to peak calling with MACS
:param bool | NoneType include_significance: Whether to pass a
statistical significance argument to peak calling with MACS; if
omitted, this will be True if the peak calling is broad or if
either p-value or q-value is specified; default significance
specification is a p-value of 0.001 if a significance is to be
specified but no value is provided for p-value or q-value.
:return str: Command to run.
"""
sizes = {"hg38": 2.7e9, "hg19": 2.7e9, "mm10": 1.87e9, "dr7": 1.412e9, "mm9": 1.87e9}
# Whether to specify to MACS2 a value for statistical significance
# can be either directly indicated, but if not, it's determined by
# whether the mark is associated with broad peaks. By default, we
# specify a significance value to MACS2 for a mark associated with a
# broad peak.
if include_significance is None:
include_significance = broad
cmd = self.tools.macs2 + " callpeak -t {0}".format(treatment_bams if type(treatment_bams) is str else " ".join(treatment_bams))
if control_bams is not None:
cmd += " -c {0}".format(control_bams if type(control_bams) is str else " ".join(control_bams))
if paired:
cmd += " -f BAMPE "
# Additional settings based on whether the marks is associated with
# broad peaks
if broad:
cmd += " --broad --nomodel --extsize 73"
else:
cmd += " --fix-bimodal --extsize 180 --bw 200"
if include_significance:
# Allow significance specification via either p- or q-value,
# giving preference to q-value if both are provided but falling
# back on a default p-value if neither is provided but inclusion
# of statistical significance measure is desired.
if qvalue is not None:
cmd += " --qvalue {}".format(qvalue)
else:
cmd += " --pvalue {}".format(pvalue or 0.00001)
cmd += " -g {0} -n {1} --outdir {2}".format(sizes[genome], sample_name, output_dir)
return cmd
def macs2_call_peaks_atacseq(self, treatment_bam, output_dir, sample_name, genome):
genome_sizes = {"hg38": 2.7e9, "hg19": 2.7e9, "mm10": 1.87e9, "dr7": 1.412e9, "mm9": 1.87e9}
cmd = self.tools.macs2 + " callpeak -t {0}".format(treatment_bam)
cmd += " --nomodel --extsize 147 -g {0} -n {1} --outdir {2}".format(genome_sizes[genome], sample_name, output_dir)
return cmd
def macs2_plot_model(self, r_peak_model_file, sample_name, output_dir):
# run macs r script
cmd1 = "{} {}".format(self.tools.Rscript, r_peak_model_file)
# move output plot to sample dir
cmd2 = "mv {0}/{1}_model.pdf {2}/{1}_model.pdf".format(os.getcwd(), sample_name, output_dir)
return [cmd1, cmd2]
def spp_call_peaks(
self, treatment_bam, control_bam, treatment_name, control_name,
output_dir, broad, cpus, qvalue=None):
"""
Build command for R script to call peaks with SPP.
:param str treatment_bam: Path to file with data for treatment sample.
:param str control_bam: Path to file with data for control sample.
:param str treatment_name: Name for the treatment sample.
:param str control_name: Name for the control sample.
:param str output_dir: Path to folder for output.
:param str | bool broad: Whether to specify broad peak calling mode.
:param int cpus: Number of cores the script may use.
:param float qvalue: FDR, as decimal value
:return str: Command to run.
"""
broad = "TRUE" if broad else "FALSE"
cmd = self.tools.Rscript + " `which spp_peak_calling.R` {0} {1} {2} {3} {4} {5} {6}".format(
treatment_bam, control_bam, treatment_name, control_name, broad, cpus, output_dir
)
if qvalue is not None:
cmd += " {}".format(qvalue)
return cmd
def bam_to_bed(self, input_bam, output_bed):
cmd = self.tools.bedtools + " bamtobed -i {0} > {1}".format(input_bam, output_bed)
return cmd
def zinba_call_peaks(self, treatment_bed, control_bed, cpus, tagmented=False):
fragmentLength = 80 if tagmented else 180
cmd = self.tools.Rscript + " `which zinba.R` -l {0} -t {1} -c {2}".format(fragmentLength, treatment_bed, control_bed)
return cmd
def filter_peaks_mappability(self, peaks, alignability, filtered_peaks):
cmd = self.tools.bedtools + " intersect -wa -u -f 1"
cmd += " -a {0} -b {1} > {2} ".format(peaks, alignability, filtered_peaks)
return cmd
def homer_find_motifs(self, peak_file, genome, output_dir, size=150, length="8,10,12,14,16", n_motifs=12):
cmd = "findMotifsGenome.pl {0} {1} {2}".format(peak_file, genome, output_dir)
cmd += " -mask -size {0} -len {1} -S {2}".format(size, length, n_motifs)
return cmd
def homer_annotate_pPeaks(self, peak_file, genome, motif_file, output_bed):
cmd = "annotatePeaks.pl {0} {1} -mask -mscore -m {2} |".format(peak_file, genome, motif_file)
cmd += "tail -n +2 | cut -f 1,5,22 > {3}".format(output_bed)
return cmd
def center_peaks_on_motifs(self, peak_file, genome, window_width, motif_file, output_bed):
cmd = "annotatePeaks.pl {0} {1} -size {2} -center {3} |".format(peak_file, genome, window_width, motif_file)
cmd += " awk -v OFS='\t' '{print $2, $3, $4, $1, $6, $5}' |"
cmd += """ awk -v OFS='\t' -F '\t' '{ gsub("0", "+", $6) ; gsub("1", "-", $6) ; print }' |"""
cmd += " fix_bedfile_genome_boundaries.py {0} | sortBed > {1}".format(genome, output_bed)
return cmd
def get_read_type(self, bam_file, n=10):
"""
Gets the read type (single, paired) and length of bam file.
:param str bam_file: Bam file to determine read attributes.
:param int n: Number of lines to read from bam file.
:return str, int: tuple of read type and read length
"""
from collections import Counter
try:
p = subprocess.Popen([self.tools.samtools, 'view', bam_file],
stdout=subprocess.PIPE)
# Count paired alignments
paired = 0
read_length = Counter()
while n > 0:
line = p.stdout.next().split("\t")
flag = int(line[1])
read_length[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
n -= 1
p.kill()
except IOError("Cannot read provided bam file.") as e:
raise e
# Get most abundant read read_length
read_length = sorted(read_length)[-1]
# If at least half is paired, return True
if paired > (n / 2.):
return "PE", read_length
else:
return "SE", read_length
def parse_bowtie_stats(self, stats_file):
"""
Parses Bowtie2 stats file, returns series with values.
:param str stats_file: Bowtie2 output file with alignment statistics.
"""
import pandas as pd
stats = pd.Series(index=["readCount", "unpaired", "unaligned", "unique", "multiple", "alignmentRate"])
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return stats
# total reads
try:
line = [i for i in range(len(content)) if " reads; of these:" in content[i]][0]
stats["readCount"] = re.sub("\D.*", "", content[line])
if 7 > len(content) > 2:
line = [i for i in range(len(content)) if "were unpaired; of these:" in content[i]][0]
stats["unpaired"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
else:
line = [i for i in range(len(content)) if "were paired; of these:" in content[i]][0]
stats["unpaired"] = stats["readCount"] - int(re.sub("\D", "", re.sub("\(.*", "", content[line])))
line = [i for i in range(len(content)) if "aligned 0 times" in content[i]][0]
stats["unaligned"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned exactly 1 time" in content[i]][0]
stats["unique"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned >1 times" in content[i]][0]
stats["multiple"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "overall alignment rate" in content[i]][0]
stats["alignmentRate"] = re.sub("\%.*", "", content[line]).strip()
except IndexError:
pass
return stats
def parse_duplicate_stats(self, stats_file):
"""
Parses sambamba markdup output, returns series with values.
:param str stats_file: sambamba output file with duplicate statistics.
"""
import pandas as pd
series = | pd.Series() | pandas.Series |
# Trains a basic random forest on both the fnc and the loadings
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import pickle
from utils import numpy_metric
root_path = '/home/nvme/Kaggle/trends-assessment-prediction'
loadings = pd.read_csv(f'{root_path}/loading.csv', index_col='Id')
fnc = pd.read_csv(f'{root_path}/fnc.csv', index_col='Id')
train_scores = | pd.read_csv(f'{root_path}/train_scores.csv', index_col='Id') | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
import chariot.transformer as ct
from chariot.language_model_preprocessor import LanguageModelPreprocessor
TEXT = """
A chariot is a type of carriage driven by a charioteer, usually using horses[a] to provide rapid motive power. Chariots were used by armies as transport or mobile archery platforms, for hunting or for racing, and as a conveniently fast way to travel for many ancient people.
The word "chariot" comes from the Latin term carrus, a loanword from Gaulish. A chariot of war or one used in military parades was called a car. In ancient Rome and some other ancient Mediterranean civilizations, a biga required two horses, a triga three, and a quadriga four.
"""
class TestLanguageModelPreprocessor(unittest.TestCase):
def _make_corpus(self):
return | pd.DataFrame.from_dict({"sentence": [TEXT]}) | pandas.DataFrame.from_dict |
try:
from isomut2py import ploidyestimation
import pandas as __pd
import sqlite3 as __sqlite3
import os as __os
import subprocess as __subprocess
from scipy import stats as __stats
import numpy as __np
import time as __time
import sys as __sys
except ImportError:
print('ImportError in isomut2py.compare, comparison of sample ploidies will not work.')
def compare_with_bed(bed_dataframe, other_file, minLen):
"""
Compares the results of ploidy estimation with a bed file defined in other_file.
:param bed_dataframe: a pandas.DataFrame of the bedfile of the sample (pandas.DataFrame)
:param other_file: The path to the bedfile of the other sample. (str)
:param minLen: The minimum length of a region to be considered different from the other_file. (int)
:returns: df_joined: A pandas.DataFrame containing region information from both the PloidyEstimation object and the other_file.
"""
cols = ['chrom', 'chromStart', 'chromEnd', 'ploidy', 'LOH']
df1 = bed_dataframe
if other_file.__class__ == str:
if not __os.path.isfile(other_file):
raise ValueError('Error: file ' + other_file + ' does not exist, bedfile comparison failed.')
df2 = | __pd.read_csv(other_file, header=0, names=cols) | pandas.read_csv |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, WebDriverException
from bs4 import BeautifulSoup
from sheet import export_to_sheets
import pandas as pd
import sys
import time
url = 'https://doph.maps.arcgis.com/apps/opsdashboard/index.html#/f8fb4ccc3d2d42c7ab0590dbb3fc26b8'
#url = 'https://covid19.moh.gov.sa/'
driver = webdriver.Chrome()
driver.get(url)
for i in range(0, 20):
time.sleep(1) # Waiting for the page to fully load
sys.stdout.write('\r{}%'.format(i*10 + 10))
sys.stdout.flush()
sys.stdout.write("\n")
with open("A.html", "w", encoding='utf-8') as f:
f.write(driver.page_source)
#soup = BeautifulSoup(driver.page_source, 'html.parser')
driver.quit()
# Opening saved MOHS dashboard page
all_stat = []
with open("A.html", encoding='utf-8') as f:
soup = BeautifulSoup(f, "html.parser")
feature_list = soup.find_all('nav', class_='feature-list')
for feature in feature_list:
stat = {}
for p in feature.find_all('p'):
strong = p.find_all('strong')
#print(strong)
for strong_tag in strong:
all_stat.append(strong_tag.text.strip())
#print(strong_tag.text)
'''
s = strong[0].text.split(' ')
s[0] = s[0].strip().replace(',', '')
stat[s[1].strip('\u200e')] = int(s[0])
'''
#print(all_stat)
df = | pd.DataFrame.from_dict(all_stat) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reconstruct SKOS
Script which analyses and reconstructs a SKOS hierarchy.
"""
__author__ = "<NAME>"
__version__ = "1.0.0"
__license__ = "cc0-1.0"
import os
import csv
import pandas as pd
import pickle
from xml.dom.minidom import parse
from datetime import datetime
from analyse import *
import os.path
def main():
start = datetime.now()
"""
print("Please provide the name of the input file located in the 'data' folder (e.g. example.rdf):")
source_file = os.path.abspath('..\data\\') + input()
print("Please provide a name for the output files (e.g. example_transformed.rdf) (only 'example' is replaced by the input and placed in the 'out folder')")
output_name = input()
"""
targeted_input_files = ["rma-skos-lib"]
input_file = targeted_input_files[0]
source_file = os.path.join(os.path.abspath('..\\xslt_mapping\\output'), input_file) + '.rdf'
output_name = input_file
transformed_file = 'output/{}_transformed.rdf'.format(output_name)
issue_file = 'output/{}_differences.csv'.format(output_name)
typeless_file = 'output/{}_typeless.csv'.format(output_name)
analyse_file = 'output/{}_analyse.xlsx'.format(output_name)
dict_file = 'output/{}_dictionary.pkl'.format(output_name)
print('{} started analysis'.format(time(start)))
dom = parse(source_file)
print('{} parsed {}'.format(time(start), source_file))
concepts = list_concepts(dom)
print('{} analyzing {} concepts'
.format(time(start), len(concepts)))
concept_schemes = referenced_concept_schemes(dom)
print('{} identified {} concept schemes'
.format(time(start), len(concept_schemes)))
# Add unknown scheme, for concepts without a type
concept_schemes.append('http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN')
schemeless_concepts = list_schemeless_concepts(dom)
print('{} {} concepts without a concept scheme'
.format(time(start), len(schemeless_concepts)))
missing_references = missing_outward_references(dom)
missing_references = restructure_missing_references(missing_references)
print('{} found {} hierarchical inconsistencies'
.format(time(start), len(missing_references)))
undefined_concepts = undefined_concept_references(dom)
print('{} found {} references to undefined concepts'
.format(time(start), len(undefined_concepts)))
new_dom = dom.cloneNode(dom)
new_dom = add_concept_schemes(new_dom, concept_schemes)
print('{} added {} concept schemes to dom'
.format(time(start), len(concept_schemes)))
new_dom = fix_loose_references(new_dom, missing_references)
print('{} added the {} missing references to file{}'
.format(time(start), len(missing_references), transformed_file))
new_dom = remove_undefined_references(new_dom, undefined_concepts)
print('{} removed the {} undefined references from file {}'
.format(time(start), len(undefined_concepts), transformed_file))
topconcepts = find_top_concepts(new_dom)
print('{} found {} concepts without broader concepts'
.format(time(start), len(topconcepts)))
schemes_dict = find_all_schemes(new_dom, 'no')
print('{} created a dictionary of schemes'
.format(time(start)))
new_dom = add_top_concepts(new_dom, topconcepts, schemes_dict)
print('{} added topconcept nodes to file {}'
.format(time(start), transformed_file))
the_properties = all_properties(new_dom, 'yes')
print('{} created property dictionary for each concept'
.format(time(start)))
write_dom_to_file(new_dom, transformed_file)
print('{} wrote new dom to file {}'
.format(time(start), transformed_file))
save_schemeless(schemeless_concepts, typeless_file)
print('{} wrote concepts without scheme to file {}'
.format(time(start), typeless_file))
save_differences(missing_references, undefined_concepts, issue_file)
print('{} wrote hierarchical differences to file {}'
.format(time(start), issue_file))
write_analyse_file(the_properties, analyse_file)
print('{} write analyse results to the file {}'
.format(time(start), analyse_file))
output = open(dict_file, 'wb')
properties_dict = {}
for concept in the_properties:
the_id = concept['id']
properties_dict[the_id] = concept
pickle.dump(properties_dict, output)
output.close()
print('{} Saved the properties of each concept to file {}'
.format(time(start), dict_file))
def create_output_dir():
if not os.path.exists('output'):
os.mkdir('output')
def add_concept_schemes(dom, concept_schemes):
# Add missing skos:ConceptScheme nodes to the root
root = dom.childNodes.item(0)
for scheme in concept_schemes:
scheme_node = dom.createElement('skos:ConceptScheme')
root.appendChild(scheme_node)
scheme_node.setAttribute('rdf:about', scheme)
concept_node = dom.createElement('dct:title')
scheme_node.appendChild(concept_node)
concept_node.setAttribute('xml:lang', 'nl')
if scheme == 'http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN':
text_node = dom.createTextNode('Scheme Unknown')
else:
text_node = dom.createTextNode(scheme[42:])
concept_node.appendChild(text_node)
return dom
def remove_reference(dom, reference):
# Remove a reference from a concept
c1 = reference[2]
c2 = reference[0]
if c1 == c2:
relation = inverse_property(reference[1])
else:
c1 = reference[0]
c2 = reference[2]
relation = reference[1]
c1 = get_concept(dom, c1)
if c1 is not None:
property_node = get_relation_property(c1, relation, c2)
c1.removeChild(property_node)
return dom
def remove_undefined_references(dom, references):
# remove all undefined references
for reference in references:
dom = remove_reference(dom, reference)
return dom
def fix_loose_references(dom, references):
# A fix of the loose references
for reference in references:
c1 = reference[0]
relation = reference[1]
c2 = reference[2]
if c1 == c2:
dom = remove_reference(dom, reference)
else:
c1 = get_concept(dom, c1)
if c1 is not None:
new_node = dom.createElement(relation)
c1.appendChild(new_node)
new_node.setAttribute('rdf:resource', c2)
return dom
def add_top_concepts(dom, concepts, schemes):
# Add the topconcept nodes to the concepts without broader concepts and to the conceptscheme nodes
for concept in concepts:
concept_id = concept
the_schemes = schemes[concept_id]
concept = get_concept(dom, concept)
if the_schemes == []:
the_schemes.append('http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN')
for scheme in the_schemes:
new_node = dom.createElement('skos:topConceptOf')
concept.appendChild(new_node)
new_node.setAttribute('rdf:resource', scheme)
scheme = get_concept_scheme(dom, scheme)
extra_node = dom.createElement('skos:hasTopConcept')
scheme.appendChild(extra_node)
extra_node.setAttribute('rdf:resource', concept_id)
return dom
def save_schemeless(schemeless_concepts, typeless_file):
# Each typeless concept is written to a csv file
a_file = open(typeless_file, "w", encoding='utf-8')
the_writer = csv.writer(a_file)
for schemeless in schemeless_concepts:
the_writer.writerow([schemeless])
a_file.close()
def save_differences(list1, list2, issue_file):
# Each difference is written to a csv file
header_list = ['concept 1', 'type of relation', 'concept 2']
a_file = open(issue_file, "w", newline='')
writer = csv.writer(a_file)
writer.writerow(header_list)
for difference in list1:
writer.writerow(difference)
writer.writerow(['-','-','-'])
for difference in list2:
writer.writerow(difference)
a_file.close()
def write_dom_to_file(dom, file):
# Write a dom to a XML file
xml_file = open(file, "w", encoding='utf-8')
xml_file.write(dom.toprettyxml())
xml_file.close()
def write_analyse_file(list, file):
# Write all analyses to a file
#writer = pd.ExcelWriter(file, engine='xlsxwriter')
with pd.ExcelWriter(file) as writer:
reference_dict, reference_list = reference_analyse(list)
df_full = pd.DataFrame.from_dict(list)
df_full.to_excel(writer, sheet_name='Full')
reference_df = pd.DataFrame(reference_list, index=['Broader', 'Narrower', 'Related'])
reference_df.to_excel(writer, sheet_name='Reference1')
reference_df2 = | pd.DataFrame(reference_dict, columns=['B-N-R', '#']) | pandas.DataFrame |
import os
import pickle
import uuid
import pandas as pd
from schools3.ml.base.experiment import Experiment
from schools3.data.features.processors.composite_feature_processor import CompositeFeatureProcessor
from schools3.data.datasets.dataset import Dataset
from schools3.config import main_config
from schools3.config import global_config
from schools3.config.ml.experiments import models_experiment_config
from schools3.ml.metrics.fairness_metrics import FairnessMetrics
from schools3.config.data.features.processors import categorical_feature_processor_config as cat_config
from schools3.data.features.processors.categorical_feature_processor import CategoricalFeatureProcessor
# base abstract class for experiments that train models and report metrics on their predictions
class ModelsExperiment(Experiment):
def __init__(
self, name='ignore',
features_list=main_config.features,
labels=main_config.labels,
models=main_config.models,
metrics=main_config.metrics,
categorical_fairness_attributes=main_config.fairness_attributes,
use_cache=main_config.use_cache
):
super(ModelsExperiment, self).__init__(name, features_list, labels)
self.models = models
self.metrics = metrics
self.categorical_fairness_attributes = categorical_fairness_attributes
self.get_model_csv_cache = models_experiment_config.get_model_csv_cache
self.use_cache = use_cache
# evaluates each model and returns all metrics in a Dataframe
def get_train_test_metrics(self, train_cohort, test_cohort, compute_train_metrics=True, use_test_for_val=True):
df = pd.DataFrame()
for model in self.models:
feature_proc = model.get_feature_processor
train_data, test_data = \
self.get_train_test_data(train_cohort, feature_proc, test_cohort)
if test_data.get_dataset().shape[0] < models_experiment_config.min_test_rows:
continue
cached_model = self.maybe_get_cached_model(model, train_data, test_data)
if cached_model is None:
model.train(train_data, test_data if use_test_for_val else None)
self.maybe_cache_model(model, train_data, test_data)
else:
model = cached_model
train_metrics = pd.DataFrame()
if compute_train_metrics:
train_scores = model.test(train_data)
train_metrics = self.metrics.compute(train_scores)
test_scores = model.test(test_data)
test_metrics = self.metrics.compute(test_scores)
test_metrics = pd.concat([
test_metrics,
self.get_fairness_metrics(model, test_data)
], axis=1)
cur_df = self.construct_metrics_row(model, train_data, test_data, train_metrics, test_metrics)
df = pd.concat([df, cur_df], ignore_index=True)
return df
# save a given model and update index of cached models
def maybe_cache_model(self, model, train_data, test_data):
if not (model.cacheable and self.use_cache):
return
key_cols = list(self.get_id_cols())
cache_df = self.get_cache_df(model)
id_row = self.construct_id_row(model, train_data, test_data)
key = tuple(id_row.iloc[0])
h = str(uuid.uuid4())
model_file = models_experiment_config.get_hash_pkl_file(model.get_model_name(), h)
h_fname = global_config.get_save_path(model_file)
with open(h_fname, 'wb') as f:
pickle.dump(model, f)
id_row['hash'] = h
id_row = id_row.set_index(key_cols)
if key in cache_df.index:
cache_df.loc[key] = id_row.iloc[0]
else:
cache_df = cache_df.append(id_row)
cache_csv_path = global_config.get_save_path(self.get_model_csv_cache(model.get_model_name()))
cache_df.to_csv(cache_csv_path)
# load a given model if it has been cached
def maybe_get_cached_model(self, model, train_data, test_data):
if main_config.overwrite_cache or (not self.use_cache):
return None
id_row = self.construct_id_row(model, train_data, test_data)
key = tuple(id_row.iloc[0])
cache_df = self.get_cache_df(model)
if key not in cache_df.index:
return None
h = cache_df.loc[key].hash
model_file = models_experiment_config.get_hash_pkl_file(model.get_model_name(), h)
h_fname = global_config.get_save_path(model_file)
with open(h_fname, 'rb') as f:
model = pickle.load(f)
return model
# read the CSV file that lists all cached models, and return this list as a Dataframe
def get_cache_df(self, model):
cache_csv_path = global_config.get_save_path(self.get_model_csv_cache(model.get_model_name()))
key_cols = list(self.get_id_cols())
return pd.read_csv(cache_csv_path).set_index(key_cols) if os.path.exists(cache_csv_path) else pd.DataFrame()
# creates Dataset objects for the given cohorts
def get_train_test_data(self, train_cohort, get_processors, test_cohort):
train_proc = get_processors()
train_data = Dataset(train_cohort, self.features_list, train_proc, self.labels)
train_stats = train_data.get_feature_proc_stats()
test_proc = get_processors(train_stats)
test_data = Dataset(test_cohort, self.features_list, test_proc, self.labels)
return train_data, test_data
# helper method to construct one row of metrics in the form of a Dataframe
def construct_metrics_row(
self, model, train_data, test_data, train_metrics, test_metrics
):
train_metrics = train_metrics.rename(columns={c: f'train {c}' for c in train_metrics.columns})
test_metrics = test_metrics.rename(columns={c: f'test {c}' for c in test_metrics.columns})
id_row = self.construct_id_row(model, train_data, test_data)
df = pd.concat([id_row, train_metrics, test_metrics], axis=1)
return df
# helper method to get each row's identifier values in a Dataframe
def construct_id_row(self, model, train_data, test_data):
df = pd.DataFrame()
model_col, hps_col, train_cohort_col, test_cohort_col, train_rows, test_rows, num_features = self.get_id_cols()
df[model_col] = [model.get_model_name()]
df[hps_col] = [model.jsonify_hps()]
df[train_cohort_col] = [train_data.cohort.get_identifier()]
df[test_cohort_col] = [test_data.cohort.get_identifier()]
df[train_rows] = [train_data.get_dataset().shape[0]]
df[test_rows] = [test_data.get_dataset().shape[0]]
df[num_features] = [len(train_data.get_dataset().columns)]
return df
# helper method that specifies column names for each row's identifiers
def get_id_cols(self):
model_col = 'model'
hps_col = 'hps'
train_cohort_col = 'train_cohort'
test_cohort_col = 'test_cohort'
train_rows = 'train_rows'
test_rows = 'test_rows'
num_features = 'num_features'
return model_col, hps_col, train_cohort_col, test_cohort_col, train_rows, test_rows, num_features
def get_all_fairness_cols(self, dataset):
cs = dataset.get_dataset().features.columns
ret_cols = {}
cat_proc = CategoricalFeatureProcessor()
for f in self.categorical_fairness_attributes:
ret_cols[f] = list(cat_proc.get_categorical_feature_names(f, cs))
return ret_cols
def get_raw_fairness_metrics(self, model, dataset):
preds = model.predict_labels(dataset, return_full_df=True)
metrics = FairnessMetrics()
fairness_dict = {}
cols = self.get_all_fairness_cols(dataset)
for orig_c in cols:
for c in cols[orig_c]:
assert c in preds.features.columns, f'fairness attribute {c} is not an input feature'
grouped_labels = preds[preds.features[c] == 1].groupby(('features', c))\
[('pred_labels', 'pred_label'), ('labels', 'label')].agg(list)
if orig_c not in fairness_dict:
fairness_dict[orig_c] = {}
fairness_dict[orig_c][c] = metrics.compute(
metrics.get_score_df(
grouped_labels.loc[1][0],
grouped_labels.loc[1][1]
)
).to_dict('list')
fairness_dict = \
{
(cat, v): fairness_dict[cat][v]
for cat in fairness_dict
for v in fairness_dict[cat]
}
for k1 in fairness_dict:
for k2 in fairness_dict[k1]:
assert len(fairness_dict[k1][k2]) == 1
fairness_dict[k1][k2] = fairness_dict[k1][k2][0]
return pd.DataFrame.from_dict(fairness_dict, orient='index')
def get_fairness_metrics(self, model, dataset, ref_cols=models_experiment_config.ref_cols):
raw_metrics = self.get_raw_fairness_metrics(model, dataset)
df = pd.DataFrame()
for cat in raw_metrics.index.get_level_values(0).unique():
if cat in ref_cols:
if isinstance(ref_cols[cat], tuple):
numer = raw_metrics.loc[cat].loc[ref_cols[cat][0]].median()
denom = raw_metrics.loc[cat].loc[ref_cols[cat][1]].median()
bias = numer / denom
cat_metrics = pd.DataFrame.from_dict({cat + ' median ratio': [bias[0]]})
else:
rel_metrics = raw_metrics.loc[cat] / raw_metrics.loc[(cat, ref_cols[cat])]
d = rel_metrics.to_dict()
d = {
(k1 + ': ' + f'{k2} / {ref_cols[cat]}'):[d[k1][k2]]
for k1 in d for k2 in d[k1]
}
cat_metrics = pd.DataFrame.from_dict(d)
else:
std = raw_metrics.loc[cat].std().to_dict()
cat_metrics = | pd.DataFrame.from_dict({(f'std {cat} ' + k):[std[k]] for k in std}) | pandas.DataFrame.from_dict |
#
# _umibato.py
#
# Copyright (c) 2020 <NAME>
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
#
import os
import sys
import math
import shutil
import logging
logging.getLogger().setLevel(logging.WARNING)
import warnings
warnings.filterwarnings('ignore')
import subprocess
from multiprocessing import Pool
from tqdm.autonotebook import tqdm
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from ._gpmodule import fit_gp_model, estimate_grad_variance
from ._plotmodule import plot_state, plot_directed_network
class Umibato(object):
def __init__(self, k_min=1, k_max=10, k_step=1, augmentation_size=0,
gp_correction=False, x_standardization=True,
y_var_lower_bound=1e-4, est_y_var=True,
max_iter=100, tol=1e-4,
n_init=100, n_jobs=5, output_path='.'):
self.K_list = list(range(k_min, k_max+1, k_step))
self.augmentation_size = augmentation_size
self.gp_correction = gp_correction
self.x_standardization = x_standardization
self.y_var_lower_bound = y_var_lower_bound
self.est_y_var = est_y_var
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.n_jobs = n_jobs
if output_path[-1] == '/':
self.output_path = output_path[:-1]
else:
self.output_path = output_path
os.makedirs(self.output_path, exist_ok=True)
def fit(self, qmps, metadata):
self._estimate_growthrates(qmps, metadata)
self._estimate_interactions()
self._copy_best_results()
if self.x_standardization:
self._modify_interaction_param()
def _estimate_growthrates(self, qmps, metadata):
x_list = []
y_list = []
y_var_list = []
timepoint_list = []
metadata_list = []
self.model_srs_list = []
if set(qmps.columns) != set(metadata.index):
print('The QMPs column name and the metadata index name must match.')
sys.exit(1)
self.metadata = metadata
pseudo_abundance = 10**(math.floor(np.log10(qmps[qmps!=0].min().min())))
ln_qmps = np.log(qmps.replace(0, pseudo_abundance))
bacteria_list = ln_qmps.index.tolist()
# ln_quantitative_abundance_table_with_metadata
ln_meta = ln_qmps.T.join(metadata)
ln_meta = ln_meta.sort_values(by=['subjectID', 'timepoint'])
self.subject_list = metadata['subjectID'].value_counts().index.tolist()
print('Fitting Gaussian process regression...')
for subject in tqdm(self.subject_list):
model_srs, x_s, y_s, y_s_var, this_metadata = \
self._estimate_growthrates_for_each_subject(ln_meta,
subject, bacteria_list)
self.model_srs_list.append(model_srs)
x_list.append(x_s)
y_list.append(y_s)
y_var_list.append(y_s_var)
metadata_list.append(this_metadata)
self._output_xy(x_list, y_list, y_var_list, metadata_list,
self.y_var_lower_bound)
def _estimate_growthrates_for_each_subject(self, ln_meta,
subject, bacteria_list):
this_ln_meta = ln_meta[ln_meta['subjectID']==subject]
this_ln = this_ln_meta[bacteria_list]
timepoint = this_ln_meta['timepoint'].astype(float)
timepoint.index = timepoint.index.astype(str)
if self.augmentation_size > 0:
augmented_timepoints = \
np.random.uniform(timepoint.min(), timepoint.max(),
size=self.augmentation_size)
timepoint = timepoint.append( | pd.Series(augmented_timepoints) | pandas.Series |
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return pd.read_json(result['data'], orient='records'), result['metadata']
def writeToCollectionExtend(collection, symbol, df, metadata=None):
jsonStrings = {"_id":symbol, "symbol":symbol, "data":df.to_json(orient='records'), "metadata":metadata}
#bsonStrings = json_util.loads(jsonStrings)
collection.save(jsonStrings)
def writeToCSV(csv_dir, CollectionKey, df):
if os.path.exists(csv_dir) == False:
os.makedirs(csv_dir)
filename = csv_dir + CollectionKey + '.csv'
df.to_csv(filename)
def queryStockList(root_path, database, sheet):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False: df = setStockList(df)
return df
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=0)
if df.empty == False: df = setStockList(df)
return df
return pd.DataFrame()
except Exception as e:
print("queryStockList Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeStockList(root_path, database, sheet, df, symbol = None):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
if symbol is not None:
df = df[df.index == symbol].reset_index()
writeToCollection(collection, df, ['symbol'])
# try:
# index_info = collection.index_information()
# print("index info", index_info)
# except Exception as e:
# print(e)
# writeToCollection(collection, df)
# #collection.create_index('symbol', unique=True, drop_dups=True)
# else:
# writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storeStockList Exception", e)
def queryStockPublishDay(root_path, database, sheet, symbol):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename) == False: return ''
df = pd.read_csv(filename, index_col=["index"])
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
except Exception as e:
print("queryStockPublishDay Exception", e)
return ''
return ''
def storePublishDay(root_path, database, sheet, symbol, date):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=["index"])
publishDate = df[df['symbol'] == symbol]
if publishDate.empty:
df.loc[len(df)] = [symbol, date]
else:
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storePublishDay Exception", e)
def queryStock(root_path, database, sheet_1, sheet_2, symbol, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
stockList = getStockList(root_path, database, sheet_1)
lastUpdateTime = pd.Timestamp(stockList.loc[symbol][update_key])
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
df.set_index('date', inplace=True)
if 'index' in df:
del df['index']
return df, lastUpdateTime
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = csv_dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename, index_col=["date"])
return df, lastUpdateTime
except Exception as e:
print("queryStock Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeStock(root_path, database, sheet_1, sheet_2, symbol, df, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, sheet_1)
if (stockList[stockList.index == symbol][update_key][0] != now_date):
stockList.set_value(symbol, update_key, now_date)
storeStockList(root_path, database, sheet_1, stockList, symbol)
# df.set_index('date')
# df.index = df.index.astype(str)
# df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = df.reset_index()
if 'date' in df: df.date = df.date.astype(str)
writeToCollectionExtend(collection, symbol, df, {})
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database)+ config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeStock Exception", e)
def queryNews(root_path, database, sheet, symbol):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp(getStockList(root_path, database, 'SHEET_US_DAILY').loc[symbol]['news_update'])
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df = readFromCollection(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
#df.set_index('date', inplace=True)
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename)
return df, lastUpdateTime
except Exception as e:
print("queryNews Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeNews(root_path, database, sheet, symbol, df):
config = getConfig(root_path)
storeType = int(global_config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, 'SHEET_US_DAILY')
stockList.set_value(symbol, 'news_update', now_date)
storeStockList(root_path, database, "SHEET_US_DAILY", stockList.reset_index())
df = df.drop_duplicates(subset=['uri'], keep='first')
#df.set_index(['date'], inplace=True)
#df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, sheet)
#df = df.reset_index()
df['symbol'] = symbol
writeToCollection(collection, df, ['symbol', 'uri'])
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeNews Exception", e)
def queryEarnings(root_path, database, sheet, date):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : date }
df, metadata = readFromCollectionExtend(collection, queryString)
return df
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + date + ".csv"
if os.path.exists(filename): return pd.read_csv(filename)
return pd.DataFrame()
except Exception as e:
print("queryEarnings Exception", e)
return pd.DataFrame()
return | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import os
import csv
import sys
import time
import glob
import logging
import warnings
import argparse
import traceback
import multiprocessing
import Bio
import numpy as np
import pandas as pd
from tqdm import tqdm
from Bio import SeqIO
import concurrent.futures
from concurrent import futures
from inStrain import SNVprofile
from collections import defaultdict
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio.codonalign.codonalphabet import default_codon_table
import inStrain.SNVprofile
import inStrain.controller
#import inStrain.profileUtilities
import inStrain.logUtils
class Controller():
'''
The command line access point to the program
'''
def main(self, args):
'''
The main method when run on the command line
'''
# Parse arguments
args = self.validate_input(args)
vargs = vars(args)
IS = vargs.pop('IS')
GF = vargs.pop('gene_file')
# Read the genes file
logging.debug('Loading genes')
scaff_2_gene_database, scaff2gene2sequence = parse_genes(GF, **vargs)
GdbP = pd.concat([x for x in scaff_2_gene_database.values()])
# Calculate all your parallelized gene-level stuff
name2result = calculate_gene_metrics(IS, GdbP, scaff2gene2sequence, **vargs)
# Store information
IS.store('genes_fileloc', GF, 'value', 'Location of genes file that was used to call genes')
IS.store('genes_table', GdbP, 'pandas', 'Location of genes in the associated genes_file')
IS.store('genes_coverage', name2result['coverage'], 'pandas', 'Coverage of individual genes')
IS.store('genes_clonality', name2result['clonality'], 'pandas', 'Clonality of individual genes')
IS.store('genes_SNP_count', name2result['SNP_density'], 'pandas', 'SNP density and counts of individual genes')
IS.store('SNP_mutation_types', name2result['SNP_mutation_types'], 'pandas', 'The mutation types of SNPs')
if vargs.get('store_everything', False):
IS.store('scaff2gene2sequence', scaff2gene2sequence, 'pickle', 'Dicitonary of scaff -> gene -> nucleotide sequence')
# Store the output
IS.generate('gene_info', **vargs)
IS.generate("SNVs", **vargs)
def validate_input(self, args):
'''
Validate and mess with the arguments a bit
'''
# Make sure the IS object is OK and load it
assert os.path.exists(args.IS)
args.IS = inStrain.SNVprofile.SNVprofile(args.IS)
# Set up the logger
log_loc = args.IS.get_location('log') + 'log.log'
inStrain.controller.setup_logger(log_loc)
return args
def gene_profile_worker(gene_cmd_queue, gene_result_queue, single_thread=False):
'''
Worker to profile splits
'''
while True:
# Get command
if not single_thread:
cmds = gene_cmd_queue.get(True)
else:
try:
cmds = gene_cmd_queue.get(timeout=5)
except:
return
# Process cmd
GPs = profile_genes_wrapper(cmds)
gene_result_queue.put(GPs)
def profile_genes_wrapper(cmds):
'''
Take a group of commands and run geneprofile
'''
results = []
for cmd in cmds:
try:
results.append(profile_genes(cmd.scaffold, **cmd.arguments))
except Exception as e:
print(e)
traceback.print_exc()
logging.error("FAILURE GeneException {0}".format(str(cmd.scaffold)))
results.append(None)
return results
def calculate_gene_metrics(IS, GdbP, scaff2gene2sequenceP, **kwargs):
'''
Calculate the metrics of all genes on a parallelized scaffold-level basis
IS = Initialized inStrain.SNVprofile
GdbP = List of gene locations
gene2sequenceP = Dicitonary of gene -> nucleotide sequence
'''
inStrain.logUtils.log_checkpoint("GeneProfile", "calculate_gene_metrics", "start")
# Get key word arguments for the wrapper
p = int(kwargs.get('processes', 6))
# Make a list of scaffolds to profile the genes of
scaffolds_with_genes = set(GdbP['scaffold'].unique())
scaffolds_in_IS = set(IS._get_covt_keys())
scaffolds_to_profile = scaffolds_with_genes.intersection(scaffolds_in_IS)
logging.info("{0} scaffolds with genes in the input; {1} scaffolds in the IS, {2} to compare".format(
len(scaffolds_with_genes), len(scaffolds_in_IS), len(scaffolds_to_profile)))
# Calculate scaffold -> number of genes to profile
s2g = GdbP['scaffold'].value_counts().to_dict()
kwargs['s2g'] = s2g
# Make global objects for the profiling
inStrain.logUtils.log_checkpoint("GeneProfile", "make_globals", "start")
global CumulativeSNVtable
CumulativeSNVtable = IS.get('cumulative_snv_table')
if len(CumulativeSNVtable) > 0:
CumulativeSNVtable = CumulativeSNVtable.sort_values('mm')
else:
CumulativeSNVtable = pd.DataFrame(columns=['scaffold'])
global covTs
covTs = IS.get('covT', scaffolds=scaffolds_to_profile)
global clonTs
clonTs = IS.get('clonT', scaffolds=scaffolds_to_profile)
global scaff2gene2sequence
scaff2gene2sequence = scaff2gene2sequenceP
global Gdb
Gdb = GdbP
inStrain.logUtils.log_checkpoint("GeneProfile", "make_globals", "end")
# Generate commands and queue them
logging.debug('Creating commands')
cmd_groups = [x for x in iterate_commands(scaffolds_to_profile, Gdb, kwargs)]
logging.debug('There are {0} cmd groups'.format(len(cmd_groups)))
inStrain.logUtils.log_checkpoint("GeneProfile", "create_queue", "start")
gene_cmd_queue = multiprocessing.Queue()
gene_result_queue = multiprocessing.Queue()
GeneProfiles = []
for cmd_group in cmd_groups:
gene_cmd_queue.put(cmd_group)
inStrain.logUtils.log_checkpoint("GeneProfile", "create_queue", "end")
if p > 1:
logging.debug('Establishing processes')
processes = []
for i in range(0, p):
processes.append(multiprocessing.Process(target=gene_profile_worker, args=(gene_cmd_queue, gene_result_queue)))
for proc in processes:
proc.start()
# Set up progress bar
pbar = tqdm(desc='Profiling genes: ', total=len(cmd_groups))
# Get the results
recieved_profiles = 0
while recieved_profiles < len(cmd_groups):
GPs = gene_result_queue.get()
recieved_profiles += 1
pbar.update(1)
for GP in GPs:
if GP is not None:
logging.debug(GP[4])
GeneProfiles.append(GP)
# Close multi-processing
for proc in processes:
proc.terminate()
# Close progress bar
pbar.close()
else:
gene_profile_worker(gene_cmd_queue, gene_result_queue, single_thread=True)
logging.info("Done profiling genes")
# Get the genes
recieved_profiles = 0
while recieved_profiles < len(cmd_groups):
logging.debug('going to grab at {0}'.format(recieved_profiles))
GPs = gene_result_queue.get(timeout=5)
logging.debug('did a grab at {0}'.format(recieved_profiles))
recieved_profiles += 1
for GP in GPs:
if GP is not None:
logging.debug(GP[4])
GeneProfiles.append(GP)
inStrain.logUtils.log_checkpoint("GeneProfile", "return_results", "start")
name2result = {}
for i, name in enumerate(['coverage', 'clonality', 'SNP_density', 'SNP_mutation_types']):
name2result[name] = pd.concat([G[i] for G in GeneProfiles])
inStrain.logUtils.log_checkpoint("GeneProfile", "return_results", "end")
inStrain.logUtils.log_checkpoint("GeneProfile", "calculate_gene_metrics", "end")
return name2result
def profile_genes(scaffold, **kwargs):
'''
This is the money that gets multiprocessed
Relies on having a global "Gdb", "gene2sequence", "CumulativeSNVtable", "covTs", and "clonTs"
* Calculate the clonality, coverage, linkage, and SNV_density for each gene
* Determine whether each SNP is synynomous or nonsynonymous
'''
# Log
pid = os.getpid()
log_message = "\nSpecialPoint_genes {0} PID {1} whole start {2}".format(scaffold, pid, time.time())
# For testing purposes
if ((scaffold == 'FailureScaffoldHeaderTesting')):
assert False
# Get the list of genes for this scaffold
gdb = Gdb[Gdb['scaffold'] == scaffold]
# Calculate gene-level coverage
log_message += "\nSpecialPoint_genes {0} PID {1} coverage start {2}".format(scaffold, pid, time.time())
if scaffold not in covTs:
logging.info("{0} isnt in covT!".format(scaffold))
cdb = pd.DataFrame()
else:
covT = covTs[scaffold]
cdb = calc_gene_coverage(gdb, covT)
del covT
log_message += "\nSpecialPoint_genes {0} PID {1} coverage end {2}".format(scaffold, pid, time.time())
# Calculate gene-level clonality
log_message += "\nSpecialPoint_genes {0} PID {1} clonality start {2}".format(scaffold, pid, time.time())
if scaffold not in clonTs:
logging.info("{0} isnt in clovT!".format(scaffold))
cldb = pd.DataFrame()
else:
clonT = clonTs[scaffold]
cldb = calc_gene_clonality(gdb, clonT)
del clonT
log_message += "\nSpecialPoint_genes {0} PID {1} clonality end {2}".format(scaffold, pid, time.time())
# Determine whether SNPs are synonmous or non-synonmous
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character start {2}".format(scaffold, pid, time.time())
Ldb = CumulativeSNVtable[CumulativeSNVtable['scaffold'] == scaffold]
if len(Ldb) == 0:
sdb = pd.DataFrame()
else:
sdb = Characterize_SNPs_wrapper(Ldb, gdb, scaff2gene2sequence[scaffold])
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character end {2}".format(scaffold, pid, time.time())
# Calculate gene-level SNP counts
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts start {2}".format(scaffold, pid, time.time())
if len(Ldb) == 0:
ldb = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This test requires a running CAS server. You must use an ~/.authinfo
# file to specify your username and password. The CAS host and port must
# be specified using the CASHOST and CASPORT environment variables.
# A specific protocol ('cas', 'http', 'https', or 'auto') can be set using
# the CASPROTOCOL environment variable.
import copy
import datetime
import numpy as np
import pandas as pd
import os
import six
import swat
import swat.utils.testing as tm
import sys
import time
import unittest
from swat.cas.datamsghandlers import *
# Pick sort keys that will match across SAS and Pandas sorting orders
SORT_KEYS = ['Origin', 'MSRP', 'Horsepower', 'Model']
USER, PASSWD = tm.get_user_pass()
HOST, PORT, PROTOCOL = tm.get_host_port_proto()
class TestDataMsgHandlers(tm.TestCase):
# Create a class attribute to hold the cas host type
server_type = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
swat.options.cas.missing.int64 = -999999
self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)
if self.s._protocol in ['http', 'https']:
tm.TestCase.skipTest(self, 'REST does not support data messages')
if type(self).server_type is None:
# Set once per class and have every test use it. No need to change between tests.
type(self).server_type = tm.get_cas_host_type(self.s)
self.srcLib = tm.get_casout_lib(self.server_type)
r = tm.load_data(self.s, 'datasources/cars_single.sashdat', self.server_type)
self.tablename = r['tableName']
self.assertNotEqual(self.tablename, None)
self.table = r['casTable']
def tearDown(self):
# tear down tests
try:
self.s.endsession()
except swat.SWATError:
pass
del self.s
swat.reset_option()
def test_csv(self):
import swat.tests as st
myFile = os.path.join(os.path.dirname(st.__file__), 'datasources', 'cars.csv')
cars = pd.io.parsers.read_csv(myFile)
dmh = swat.datamsghandlers.CSV(myFile, nrecs=20)
# Use the default caslib. Get it from the results, and use it in later actions.
out = self.s.addtable(table='cars', **dmh.args.addtable)
srcLib = out['caslib']
out = self.s.tableinfo(caslib=srcLib, table='cars')
data = out['TableInfo']
self.assertEqual(data.ix[:,'Name'][0], 'CARS')
self.assertEqual(data.ix[:,'Rows'][0], 428)
self.assertEqual(data.ix[:,'Columns'][0], 15)
out = self.s.columninfo(table=self.s.CASTable('cars', caslib=srcLib))
data = out['ColumnInfo']
self.assertEqual(len(data), 15)
self.assertEqual(data.ix[:,'Column'].tolist(), 'Make,Model,Type,Origin,DriveTrain,MSRP,Invoice,EngineSize,Cylinders,Horsepower,MPG_City,MPG_Highway,Weight,Wheelbase,Length'.split(','))
self.assertEqual(data.ix[:,'Type'].tolist(), ['varchar', 'varchar', 'varchar', 'varchar', 'varchar', 'int64', 'int64', 'double', 'int64', 'int64', 'int64', 'int64', 'int64', 'int64', 'int64'])
self.assertTablesEqual(cars, self.s.CASTable('cars', caslib=srcLib), sortby=SORT_KEYS)
self.s.droptable(caslib=srcLib, table='cars')
def test_dataframe(self):
# Boolean
s_bool_ = pd.Series([True, False], dtype=np.bool_)
s_bool8 = pd.Series([True, False], dtype=np.bool8)
# Integers
s_byte = pd.Series([100, 999], dtype=np.byte)
s_short = pd.Series([100, 999], dtype=np.short)
s_intc = pd.Series([100, 999], dtype=np.intc)
s_int_ = | pd.Series([100, 999], dtype=np.int_) | pandas.Series |
"""optimize over a network structure."""
import argparse
import logging
import os
import copy
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import Neural_Prior
import config
from data import (ArgoverseSceneFlowDataset, KITTISceneFlowDataset,
NuScenesSceneFlowDataset, FlyingThings3D)
from utils import scene_flow_metrics, Timers, GeneratorWrap, EarlyStopping
from loss import my_chamfer_fn
from visualize import show_flows, flow_to_rgb, custom_draw_geometry_with_key_callback
device = torch.device("cuda:0")
def solver(
pc1: torch.Tensor,
pc2: torch.Tensor,
flow: torch.Tensor,
options: argparse.Namespace,
net: torch.nn.Module,
i: int,
):
for param in net.parameters():
param.requires_grad = True
if options.backward_flow:
net_inv = copy.deepcopy(net)
params = [{'params': net.parameters(), 'lr': options.lr, 'weight_decay': options.weight_decay},
{'params': net_inv.parameters(), 'lr': options.lr, 'weight_decay': options.weight_decay}]
else:
params = net.parameters()
if options.optimizer == "sgd":
print('using SGD.')
optimizer = torch.optim.SGD(params, lr=options.lr, momentum=options.momentum, weight_decay=options.weight_decay)
elif options.optimizer == "adam":
print("Using Adam optimizer.")
optimizer = torch.optim.Adam(params, lr=options.lr, weight_decay=0)
total_losses = []
chamfer_losses = []
early_stopping = EarlyStopping(patience=options.early_patience, min_delta=0.0001)
if options.time:
timers = Timers()
timers.tic("solver_timer")
pc1 = pc1.cuda().contiguous()
pc2 = pc2.cuda().contiguous()
flow = flow.cuda().contiguous()
normal1 = None
normal2 = None
# ANCHOR: initialize best metrics
best_loss_1 = 10.
best_flow_1 = None
best_epe3d_1 = 1.
best_acc3d_strict_1 = 0.
best_acc3d_relax_1 = 0.
best_angle_error_1 = 1.
best_outliers_1 = 1.
best_epoch = 0
for epoch in range(options.iters):
optimizer.zero_grad()
flow_pred_1 = net(pc1)
pc1_deformed = pc1 + flow_pred_1
loss_chamfer_1, _ = my_chamfer_fn(pc2, pc1_deformed, normal2, normal1)
if options.backward_flow:
flow_pred_1_prime = net_inv(pc1_deformed)
pc1_prime_deformed = pc1_deformed - flow_pred_1_prime
loss_chamfer_1_prime, _ = my_chamfer_fn(pc1_prime_deformed, pc1, normal2, normal1)
if options.backward_flow:
loss_chamfer = loss_chamfer_1 + loss_chamfer_1_prime
else:
loss_chamfer = loss_chamfer_1
loss = loss_chamfer
flow_pred_1_final = pc1_deformed - pc1
if options.compute_metrics:
EPE3D_1, acc3d_strict_1, acc3d_relax_1, outlier_1, angle_error_1 = scene_flow_metrics(flow_pred_1_final, flow)
else:
EPE3D_1, acc3d_strict_1, acc3d_relax_1, outlier_1, angle_error_1 = 0, 0, 0, 0, 0
# ANCHOR: get best metrics
if loss <= best_loss_1:
best_loss_1 = loss.item()
best_epe3d_1 = EPE3D_1
best_flow_1 = flow_pred_1_final
best_epe3d_1 = EPE3D_1
best_acc3d_strict_1 = acc3d_strict_1
best_acc3d_relax_1 = acc3d_relax_1
best_angle_error_1 = angle_error_1
best_outliers_1 = outlier_1
best_epoch = epoch
if epoch % 50 == 0:
logging.info(f"[Sample: {i}]"
f"[Ep: {epoch}] [Loss: {loss:.5f}] "
f" Metrics: flow 1 --> flow 2"
f" [EPE: {EPE3D_1:.3f}] [Acc strict: {acc3d_strict_1 * 100:.3f}%]"
f" [Acc relax: {acc3d_relax_1 * 100:.3f}%] [Angle error (rad): {angle_error_1:.3f}]"
f" [Outl.: {outlier_1 * 100:.3f}%]")
total_losses.append(loss.item())
chamfer_losses.append(loss_chamfer)
if options.animation:
yield flow_pred_1_final.detach().cpu().numpy()
if early_stopping.step(loss):
break
loss.backward()
optimizer.step()
if options.time:
timers.toc("solver_timer")
time_avg = timers.get_avg("solver_timer")
logging.info(timers.print())
# ANCHOR: get the best metrics
info_dict = {
'loss': best_loss_1,
'EPE3D_1': best_epe3d_1,
'acc3d_strict_1': best_acc3d_strict_1,
'acc3d_relax_1': best_acc3d_relax_1,
'angle_error_1': best_angle_error_1,
'outlier_1': best_outliers_1,
'time': time_avg,
'epoch': best_epoch
}
# NOTE: visualization
if options.visualize:
fig = plt.figure(figsize=(13, 5))
ax = fig.gca()
ax.plot(total_losses, label="loss")
ax.legend(fontsize="14")
ax.set_xlabel("Iteration", fontsize="14")
ax.set_ylabel("Loss", fontsize="14")
ax.set_title("Loss vs iterations", fontsize="14")
plt.show()
idx = 0
show_flows(pc1[idx], pc2[idx], best_flow_1[idx])
# ANCHOR: new plot style
pc1_o3d = o3d.geometry.PointCloud()
colors_flow = flow_to_rgb(flow[0].cpu().numpy().copy())
pc1_o3d.points = o3d.utility.Vector3dVector(pc1[0].cpu().numpy().copy())
pc1_o3d.colors = o3d.utility.Vector3dVector(colors_flow / 255.0)
custom_draw_geometry_with_key_callback([pc1_o3d]) # Press 'k' to see with dark background.
return info_dict
def optimize_neural_prior(options, data_loader):
if options.time:
timers = Timers()
timers.tic("total_time")
save_dir_path = f"checkpoints/{options.exp_name}"
outputs = []
if options.model == 'neural_prior':
net = Neural_Prior(filter_size=options.hidden_units, act_fn=options.act_fn).cuda()
else:
raise Exception("Model not available.")
for i, data in tqdm(enumerate(data_loader), total=len(data_loader), smoothing=0.9):
logging.info(f"# Working on sample: {data_loader.dataset.datapath[i]}...")
pc1, pc2, flow = data
if options.visualize:
idx = 0
# NOTE: ground truth flow
show_flows(pc1[idx], pc2[idx], flow[idx])
solver_generator = GeneratorWrap(solver(pc1, pc2, flow, options, net, i))
if options.animation:
#TODO: save frames to make video.
info_dict = solver_generator.value
else:
for _ in solver_generator: pass
info_dict = solver_generator.value
# Collect results.
info_dict['filepath'] = data_loader.dataset.datapath[i]
outputs.append(info_dict)
print(info_dict)
if options.time:
timers.toc("total_time")
time_avg = timers.get_avg("total_time")
logging.info(timers.print())
df = | pd.DataFrame(outputs) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c2': 'category'})
result = r.execute().fetch()
expected = raw.astype({'c2': 'category'})
pd.testing.assert_frame_equal(expected, result)
# test series category
raw = pd.Series(np.random.choice(['a', 'b', 'c'], size=(10,)))
series = from_pandas_series(raw, chunk_size=4)
result = series.astype('category').execute().fetch()
expected = raw.astype('category')
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=3)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b']), copy=False).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b']), copy=False)
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=6)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b', 'd'])).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b', 'd']))
pd.testing.assert_series_equal(expected, result)
def test_drop(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
columns = ['c2', 'c4', 'c5', 'c6']
index = [3, 6, 7]
r = df.drop(columns=columns, index=index)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(columns=columns, index=index))
idx_series = from_pandas_series(pd.Series(index))
r = df.drop(idx_series)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(pd.Series(index)))
df.drop(columns, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns, axis=1))
del df['c3']
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3'], axis=1))
ps = df.pop('c8')
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3', 'c8'], axis=1))
pd.testing.assert_series_equal(ps.execute().fetch(),
raw['c8'])
# test series drop
raw = pd.Series(rs.randint(1000, size=(20,)))
series = from_pandas_series(raw, chunk_size=3)
r = series.drop(index=index)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.drop(index=index))
# test index drop
ser = pd.Series(range(20))
rs.shuffle(ser)
raw = pd.Index(ser)
idx = from_pandas_index(raw)
r = idx.drop(index)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.drop(index))
def test_melt(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
r = df.melt(id_vars=['c1'], value_vars=['c2', 'c4'])
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'variable']).reset_index(drop=True),
raw.melt(id_vars=['c1'], value_vars=['c2', 'c4']).sort_values(['c1', 'variable']).reset_index(drop=True)
)
def test_drop_duplicates(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
for ignore_index in [True, False]:
try:
r = df.drop_duplicates(method=method, subset=subset,
keep=keep, ignore_index=ignore_index)
result = r.execute().fetch()
try:
expected = raw.drop_duplicates(subset=subset,
keep=keep, ignore_index=ignore_index)
except TypeError:
# ignore_index is supported in pandas 1.0
expected = raw.drop_duplicates(subset=subset,
keep=keep)
if ignore_index:
expected.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}, ignore_index={ignore_index}') from e
# test series and index
s = raw['c3']
ind = pd.Index(s)
for tp, obj in [('series', s), ('index', ind)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.drop_duplicates(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.drop_duplicates(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
# test inplace
series = from_pandas_series(s, chunk_size=11)
series.drop_duplicates(inplace=True)
result = series.execute().fetch()
expected = s.drop_duplicates()
pd.testing.assert_series_equal(result, expected)
def test_duplicated(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
try:
r = df.duplicated(method=method, subset=subset, keep=keep)
result = r.execute().fetch()
expected = raw.duplicated(subset=subset, keep=keep)
pd.testing.assert_series_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}') from e
# test series
s = raw['c3']
for tp, obj in [('series', s)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.duplicated(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.duplicated(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
def test_memory_usage_execution(setup):
dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
data = dict([(t, np.ones(shape=500).astype(t))
for t in dtypes])
raw = pd.DataFrame(data)
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.DataFrame(data, index=np.arange(500).astype('object'))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.Series(np.ones(shape=500).astype('object'), name='s')
series = from_pandas_series(raw)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=False)
assert r.execute().fetch() == raw.memory_usage(index=False)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Series(np.ones(shape=500).astype('object'),
index=np.arange(500).astype('object'), name='s')
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Index(np.arange(500), name='s')
index = from_pandas_index(raw)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
index = from_pandas_index(raw, chunk_size=100)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
def test_select_dtypes_execution(setup):
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(10, size=10)})
df = from_pandas_df(raw, chunk_size=5)
r = df.select_dtypes(include=['float64'])
result = r.execute().fetch()
expected = raw.select_dtypes(include=['float64'])
pd.testing.assert_frame_equal(result, expected)
def test_map_chunk_execution(setup):
raw = pd.DataFrame(np.random.rand(10, 5),
columns=[f'col{i}' for i in range(5)])
df = from_pandas_df(raw, chunk_size=(5, 3))
def f1(pdf):
return pdf + 1
r = df.map_chunk(f1)
result = r.execute().fetch()
expected = raw + 1
pd.testing.assert_frame_equal(result, expected)
raw_s = raw['col1']
series = from_pandas_series(raw_s, chunk_size=5)
r = series.map_chunk(f1)
result = r.execute().fetch()
expected = raw_s + 1
pd.testing.assert_series_equal(result, expected)
def f2(pdf):
return pdf.sum(axis=1)
df = from_pandas_df(raw, chunk_size=5)
r = df.map_chunk(f2, output_type='series')
result = r.execute().fetch()
expected = raw.sum(axis=1)
pd.testing.assert_series_equal(result, expected)
raw = pd.DataFrame({'a': [f's{i}'for i in range(10)],
'b': np.arange(10)})
df = from_pandas_df(raw, chunk_size=5)
def f3(pdf):
return pdf['a'].str.slice(1).astype(int) + pdf['b']
with pytest.raises(TypeError):
r = df.map_chunk(f3)
_ = r.execute().fetch()
r = df.map_chunk(f3, output_type='series')
result = r.execute(extra_config={'check_dtypes': False}).fetch()
expected = f3(raw)
pd.testing.assert_series_equal(result, expected)
def f4(pdf):
ret = pd.DataFrame(columns=['a', 'b'])
ret['a'] = pdf['a'].str.slice(1).astype(int)
ret['b'] = pdf['b']
return ret
with pytest.raises(TypeError):
r = df.map_chunk(f4, output_type='dataframe')
_ = r.execute().fetch()
r = df.map_chunk(f4, output_type='dataframe',
dtypes=pd.Series([np.dtype(int), raw['b'].dtype], index=['a', 'b']))
result = r.execute().fetch()
expected = f4(raw)
pd.testing.assert_frame_equal(result, expected)
raw2 = pd.DataFrame({'a': [np.array([1, 2, 3]), np.array([4, 5, 6])]})
df2 = from_pandas_df(raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = df2.map_chunk(lambda x: x['a'].apply(pd.Series), output_type='dataframe',
dtypes=dtypes)
assert r.shape == (2, 3)
pd.testing.assert_series_equal(r.dtypes, dtypes)
result = r.execute().fetch()
expected = raw2.apply(lambda x: x['a'], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
raw = pd.DataFrame(np.random.rand(10, 5),
columns=[f'col{i}' for i in range(5)])
df = from_pandas_df(raw, chunk_size=(5, 3))
def f5(pdf, chunk_index):
return pdf + 1 + chunk_index[0]
r = df.map_chunk(f5, with_chunk_index=True)
result = r.execute().fetch()
expected = (raw + 1).add(np.arange(10) // 5, axis=0)
pd.testing.assert_frame_equal(result, expected)
raw_s = raw['col1']
series = from_pandas_series(raw_s, chunk_size=5)
r = series.map_chunk(f5, with_chunk_index=True)
result = r.execute().fetch()
expected = raw_s + 1 + np.arange(10) // 5
| pd.testing.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
from .models import Applicant, Position, FormQuestion, Education, Stream, Classification, FormAnswer
from .NLP.helpers.format_text import reprocess_line_breaks, strip_bullet_points
from .NLP.run_NLP_scripts import generate_nlp_extracts
from .models import Applicant, Position, FormQuestion, Education, Stream, Classification, FormAnswer, NlpExtract
import random
import re
import string
import pandas as pd
import tabula
from celery import current_task
from fuzzywuzzy import fuzz
from pandas import options
###################
# Functions to determine what type of information is contained in an item
###################
def is_question(item):
first_column = item[item.columns[0]]
return first_column.str.startswith("Question - Français").any()
def is_qualification(item):
first_column = item[item.columns[0]]
return first_column.str.contains("Question - Français / French:").any()
def is_stream(item):
if not str(item.shape) == "(1, 1)":
for index, row in item.iterrows():
found_string = item.iloc[index, 1]
if re.search(r"^Are you applying", found_string, re.IGNORECASE):
return True
return False
def is_education(item):
first_column = item[item.columns[0]]
return first_column.str.contains("Niveau d'études / Academic Level:").any()
def is_classification(item):
for index, row in item.iterrows():
found_string = item.iloc[index, 0]
ratio = fuzz.partial_ratio("Situation professionnelle", found_string)
if ratio > 90:
return True
return False
###################
# Helper functions that perform minor tasks (formatting/checks)
###################
def is_final_answer(item):
applicant_answer = get_column_value(
"Réponse du postulant / Applicant Answer:", item)
return applicant_answer.lower == "nan"
def check_if_table_valid(table):
# Checks if the table is a dataframe, not empty, and isn't None.
return (isinstance(table, pd.DataFrame)) and (not table.empty) and (table is not None)
def clean_data(x):
x = re.sub(r'\r', '\n', x)
x = re.sub(r'\n', ' ', x)
x = re.sub(r'jJio', '\n', x)
return x.strip()
def text_between(start_string, end_string, text):
# Effectively returns the string between the first occurrence of start_string and end_string in text
extracted_text = text.split(start_string, 1)[1].split(end_string, 1)[0]
return extracted_text
def get_column_value(search_string, item):
pairings = dict(zip(item[0], item[1]))
for key in pairings.keys():
if fuzz.partial_ratio(search_string, key) >= 90:
return pairings[key]
return "N/A"
def retrieve_question(table, all_questions):
question_text = parse_question_text(
table).replace('\n', " ").replace(" ", "")
for other_question in all_questions:
other_question_text = other_question.question_text.replace(
'\n', " ").replace(" ", "")
if fuzz.ratio(question_text, other_question_text) > 95:
return other_question
return None
def create_short_question_text(long_text):
if "*Recent" in long_text:
return long_text.split("*Recent", 1)[0]
elif "**Significant" in long_text:
return long_text.split("*Significant", 1)[0]
elif "*Significant" in long_text:
return long_text.split("*Significant", 1)[0]
else:
return long_text
def find_and_get_req(position, question_text):
best_req = 0
best_matched_requirement = None
for requirement in position.requirement_set.all():
comparison = fuzz.partial_ratio(requirement.description, question_text)
if comparison > best_req:
best_req = comparison
best_matched_requirement = requirement
if best_matched_requirement:
if fuzz.partial_ratio(best_matched_requirement.description, question_text) > 85:
return best_matched_requirement
return None
def does_exist(question, all_questions):
question_text = question.question_text.replace('\n', " ").replace(" ", "")
for other_question in all_questions:
other_question_text = other_question.question_text.replace(
'\n', " ").replace(" ", "")
if fuzz.ratio(question_text, other_question_text) > 95:
return True
return False
def split_on_slash_take_second(str):
return str.replace('\n', ' ').split(" / ")[1]
###################
# Functions that deal with one-line values for applicants
###################
def parse_citizenship(item):
applicant_citizenship = get_column_value(
"Citoyenneté / Citizenship:", item)
if "Canadian Citizen" in applicant_citizenship:
applicant_citizenship = "Canadian Citizen"
return applicant_citizenship
def parse_working_ability(item):
working_ability = get_column_value(
"Connaissance pratique / Working ability:", item)
return working_ability
def parse_english_ability(working_ability):
english_working_ability = working_ability.replace('\n', ' ').split(
"Anglais / English:", 1)[1].split(" / ")[1]
return english_working_ability
def parse_french_ability(working_ability):
french_working_ability = split_on_slash_take_second(
text_between("Français / French :", "Anglais / English:",
working_ability))
return french_working_ability
# Generic function
def parse_single_line_boolean(defining_string, item):
value = get_column_value(defining_string, item)
if "No" in value:
return "False"
else:
return "True"
# Generic function
def parse_single_line_value(defining_string, item):
value = get_column_value(defining_string, item)
return split_on_slash_take_second(value)
def fill_in_single_line_arguments(item, applicant):
# Fill in single line entries that require very little processing.
first_column = item[item.columns[0]].astype(str)
if first_column.str.startswith("Citoyenneté").any():
applicant.citizenship = parse_citizenship(item)
if first_column.str.startswith("Droit de priorité").any():
applicant.priority = parse_single_line_boolean("Droit de priorité / Priority entitlement:", item)
if first_column.str.contains("combattants").any():
applicant.veteran_preference = parse_single_line_boolean("anciens combattants", item)
if first_column.str.startswith("Première langue officielle").any():
applicant.first_official_language = parse_single_line_value(
"Première langue officielle / First official language:", item)
if first_column.str.startswith("Connaissance pratique").any():
working_ability = parse_working_ability(item)
applicant.french_working_ability = parse_french_ability(working_ability)
applicant.english_working_ability = parse_english_ability(working_ability)
if first_column.str.contains("Examen écrit / Written exam:").any():
applicant.written_exam = parse_single_line_value("Examen écrit / Written exam:", item)
if first_column.str.contains("Correspondance: / Correspondence:").any():
applicant.correspondence = parse_single_line_value("Correspondance: / Correspondence:", item)
if first_column.str.contains("Entrevue / Interview:").any():
applicant.interview = parse_single_line_value("Entrevue / Interview:", item)
return applicant
###################
# Corrective functions to mend the nuances caused by the limitations of pdf
###################
def correct_split_item(tables):
# Corrects splits between tables. (Not including splits between questions or educations)
for index, item in enumerate(tables):
if check_if_table_valid(item):
if ((index + 1) != len(tables)) and not str(item.shape) == "(1, 1)":
item2 = tables[index + 1]
if item2.empty:
if (index + 2) != len(tables):
item2 = tables[index + 2]
if check_if_table_valid(item2):
if "nan" == item2.iloc[0, 0].lower():
item.iloc[-1, -1] = item.iloc[-1, -1] + \
item2.iloc[0, 1]
item2 = item2.iloc[1:, ]
item = | pd.concat([item, item2], ignore_index=True) | pandas.concat |
import datetime
import json
from pathlib import Path
import boto3
import pandas as pd
import requests
def get_utc_days(format='%Y-%m-%d'):
utc = datetime.datetime.utcnow()
yesterday = utc.date() - datetime.timedelta(1)
return utc.strftime(format), yesterday.strftime(format)
def getOrders(to_csv=True):
liveBoardLink = 'https://www.whiskyinvestdirect.com/view_market_json.do'
res = requests.get(liveBoardLink)
if res.status_code != requests.codes.ok:
res.raise_for_status()
return
res = res.json()
pitches = res["market"]["pitches"]
pitches = pd.DataFrame(pitches)
pitches.set_index(['pitchId'], inplace=True)
# Generate and update pitches table
pitch_table_cols = ['barrelTypeCode', 'bondQuarter', 'bondYear',
'categoryName', 'considerationCurrency', 'distillery',
'securityId', 'size', 'soldOut']
pitch_table = pitches[pitch_table_cols]
if to_csv:
PITCHFILE = Path('/tmp/pitches.csv')
pitch_table.to_csv(PITCHFILE, mode='w')
# Generate and append pricing table
pricing = pitches['prices'].apply(pd.Series)
pricing.reset_index(inplace=True)
pricing = | pd.melt(pricing, id_vars='pitchId') | pandas.melt |
import numpy as np
import pandas as pd
import os.path
from sklearn.model_selection import train_test_split
# Create GROUND TRUTH dataset
def ground_truth():
dir = os.getcwd() # Gets the current working directory
df = pd.read_csv(dir + '\\dataset\\train\\imbalanced_tweets.csv')
X_train, X_test, y_train, y_test = train_test_split(df['tweet'], df['label'], test_size=0.10, random_state = 42)
# Clear and combine datasets
train = pd.DataFrame(list(zip(y_train, X_train)), columns=['label', 'tweet'])
test = pd.DataFrame(list(zip(y_test, X_test)), columns=['label', 'tweet'])
train = train.sample(frac=1).reset_index(drop=True)
test = test.sample(frac=1).reset_index(drop=True)
count_0, count_1 = train['label'].value_counts()
print(count_1, count_0)
count_0, count_1 = test['label'].value_counts()
print(count_1, count_0)
train.head(20)
test.head(20)
train.to_csv(dir + '\\dataset\\train\\training_imbalanced_temp.csv')
test.to_csv(dir + '\\dataset\\train\\ground_truth.csv')
print("END SCRIPT")
# CREATE BALANCED DATASET
def balance_dataset():
dir = os.getcwd() # Gets the current working directory
train_file_A = dir + '\\dataset\\train\\training_imbalanced_temp.csv'
train_A = | pd.read_csv(train_file_A) | pandas.read_csv |
# Project: GBS Tool
# Author: Dr. <NAME>, <EMAIL>, denamics GmbH
# Date: March 29, 2018
# License: MIT License (see LICENSE file of this package for more information)
# Helper to get information about the base case (from input data).
import os
from distutils.util import strtobool
import pandas as pd
from bs4 import BeautifulSoup as soup
from Analyzer.DataRetrievers.getDataChannels import getDataChannels
from Analyzer.DataRetrievers.readNCFile import readNCFile
def getBasecase(projectName, rootProjectPath):
'''
Retrieve base case data and meta data required for initial estimate of search space boundaries and data
sparsing.
FUTUREFEATURE: Note that this does its own load calculation, which may be redundant or differ from load calculations
done in the InputHandler. This should be revisited in the future.
:return time: [Series] time vector
:return firmLoadP: [Series] firm load vector
:return varLoadP: [Series] variable (switchable, manageable, dispatchable) load vector
:return firmGenP: [Series] firm generation vector
:return varGenP: [Series] variable generation vector
:return allGenP: [DataFrame] contains time channel and all generator channels.
'''
# Read project meta data to get (a) all loads, (b) all generation, and their firm and variable subsets.
setupMetaHandle = open(os.path.join(rootProjectPath, 'InputData/Setup/' + projectName + 'Setup.xml'), 'r')
setupMetaData = setupMetaHandle.read()
setupMetaHandle.close()
setupMetaSoup = soup(setupMetaData, 'xml')
# Retrieve the time and firm load vectors
firmLoadPFileName = setupMetaSoup.loadProfileFile.get('value')
firmLoadPFile = readNCFile \
(os.path.join(rootProjectPath, 'InputData/TimeSeriesData/ProcessedData/' + firmLoadPFileName))
time = pd.Series(firmLoadPFile.time[:])
firmLoadP = pd.Series((firmLoadPFile.value[:] + firmLoadPFile.offset ) * firmLoadPFile.scale)
# Setup other data channels
firmGenP = pd.Series(firmLoadP.copy() * 0)
varGenP = pd.Series(firmLoadP.copy() * 0)
varLoadP = pd.Series(firmLoadP.copy() * 0)
allGenP = pd.DataFrame(time, columns=['time'])
# Get list if all components
components = setupMetaSoup.componentNames.get('value').split()
# Step through the given list of components and assign them to the correct data channel if appropriate
for cpt in components:
# load meta data for the component
cptMetaHandle = open(os.path.join(rootProjectPath, 'InputData/Components/' + cpt +'Descriptor.xml'), 'r')
cptMetaData = cptMetaHandle.read()
cptMetaHandle.close()
cptMetaSoup = soup(cptMetaData, 'xml')
# Read the type, if it is a source it is going into one of the generation channels
if cptMetaSoup.type.get('value') == 'source':
# Check if it can load follow, if true add it to the firmGenP channel
if strtobool(cptMetaSoup.isLoadFollowing.get('value')):
# Load associated time series - actual power for firmGenP
chName = cptMetaSoup.component.get('name') + 'P'
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
val = pd.Series(tempDf[chName])
firmGenP = firmGenP + val
# Also add it to the allGenP dataframe
dfVal = pd.DataFrame(val, columns=[chName])
allGenP = pd.concat([allGenP, dfVal], axis=1)
# If it cannot load follow, it is a variable generator
else: # not strtobool(cptMetaSoup.isLoadFollowing.get('value'))
# Load associated time series - PAvail for varGenP if it exists
chName = cptMetaSoup.component.get('name') + 'PAvail'
if os.path.isfile(
os.path.join(rootProjectPath, 'InputData/TimeSeriesData/ProcessedData/', chName + '.nc')):
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
else:
chName = cptMetaSoup.component.get('name') + 'P'
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
val = pd.Series(tempDf[chName])
varGenP = varGenP + val
# if the type is source, and the name is not the same as the one in firmLoadPFileName, add to varLoadP
elif cptMetaSoup.type.get('value') == 'sink' and cptMetaSoup.component.get('name') != firmLoadPFileName[:-3]:
# Load associated time series - PAvail for varLoadP if it exists
chName = cptMetaSoup.component.get('name') + 'PAvail'
if os.path.isfile(os.path.join(rootProjectPath, 'InputData/TimeSeriesData/ProcessedData/', chName + '.nc')):
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
else:
chName = cptMetaSoup.component.get('name') + 'P'
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
val = pd.Series(tempDf[chName])
# add to the varLoadP variable
varLoadP = varLoadP + val
# if the type is sink-source (or source-sink, to plan for silly users...) add to varLoadP if negative and
# firmGenP is positive. This follows the sign convention discussed for energy storage (positive TOWARDS the
# grid; negative FROM the grid), see issue #87. And it posits that energy storage is either a variable load
# or firm generation (with variable PAvail).
elif cptMetaSoup.type.get('value') == 'sink-source' or cptMetaSoup.type.get('value') == 'source-sink':
# Load associated time series - actual power for firmGenP
chName = cptMetaSoup.component.get('name') + 'P'
tempDf = getDataChannels(rootProjectPath, '/InputData/TimeSeriesData/ProcessedData/', chName)
val = | pd.Series(tempDf[chName]) | pandas.Series |
import pandas as pd
import numpy as np
from tqdm import tqdm
from Bio.PDB import Selection, PDBParser
"""
This script is to extract beads from the predicted structures in CASP13 and CASP14 after the competitions.
"""
def extract_beads(pdb_path):
amino_acids = pd.read_csv('/home/hyang/bio/erf/data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
vocab_dict = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
p = PDBParser()
structure = p.get_structure('X', pdb_path)
residue_list = Selection.unfold_entities(structure, 'R')
ca_center_list = []
cb_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein has non natural amino acids')
continue
chain_list.append(res.parent.id)
res_name_list.append(vocab_dict[res.get_resname()])
res_num_list.append(res.id[1])
try:
ca_center_list.append(res['CA'].get_coord())
except KeyError:
return 0
if res.get_resname() != 'GLY':
try:
cb_center_list.append(res['CB'].get_coord())
except KeyError:
return 0
else:
cb_center_list.append(res['CA'].get_coord())
ca_center = np.vstack(ca_center_list)
cb_center = np.vstack(cb_center_list)
df = pd.DataFrame({'chain_id': chain_list,
'group_num': res_num_list,
'group_name': res_name_list,
'x': ca_center[:, 0],
'y': ca_center[:, 1],
'z': ca_center[:, 2],
'xcb': cb_center[:, 0],
'ycb': cb_center[:, 1],
'zcb': cb_center[:, 2]})
df.to_csv(f'{pdb_path}_bead.csv', index=False)
return 1
def extract_casp13_14():
# casp_id = 'casp13'
casp_id = 'casp14'
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}/'
casp = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
modified_casp_id = []
for casp_id in tqdm(casp):
pdb_list = pd.read_csv(f'{root_dir}/{casp_id}/flist.txt')['pdb'].values
ca_only_list = []
for i, pdb_id in enumerate(pdb_list):
pdb_path = f'{root_dir}/{casp_id}/{pdb_id}'
result = extract_beads(pdb_path)
if result == 0:
# some structure prediction only has CA.
ca_only_list.append(pdb_id)
pdb_list[i] = '0'
if len(ca_only_list) > 0:
pdb_list = pdb_list[pdb_list != '0']
df = pd.DataFrame({'pdb': pdb_list})
df.to_csv(f'{root_dir}/{casp_id}/flist.txt', index=False)
modified_casp_id.append(casp_id)
def check_residue_num():
# some groups submit models for only parts of the domains, exclude those models.
casp_id = 'casp14'
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}/'
casp = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
for casp_id in tqdm(casp):
pdb_list = pd.read_csv(f'{root_dir}/{casp_id}/flist.txt')['pdb'].values
num = np.zeros(len(pdb_list))
for i, pdb_id in enumerate(pdb_list):
df = pd.read_csv(f'{root_dir}/{casp_id}/{pdb_id}_bead.csv')
num[i] = df.shape[0]
if len(np.unique(num)) > 1:
seq_len = np.median(num)
pdb_list = pdb_list[(num == seq_len)]
df = pd.DataFrame({'pdb': pdb_list})
df.to_csv(f'{root_dir}/{casp_id}/flist.txt', index=False)
print(casp_id, seq_len, num)
def check_missing_residues():
# chech which casp14 evaluation units have gaps
casp_id = 'casp14'
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}/'
casp = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
no_missing_res_list = []
seq_len_list = []
idx = np.zeros(casp.shape[0])
for i, pdb in tqdm(enumerate(casp)):
pdb_list = | pd.read_csv(f'{root_dir}/{pdb}/flist.txt') | pandas.read_csv |
import pandas as pd
import pytest
# paso imports
from paso.base import Paso, PasoError
from paso.pre.encoders import Encoders
from loguru import logger
session = Paso(parameters_filepath="../../parameters/lesson.1.yaml").startup()
# 0
def test_Class_init_NoArg():
with pytest.raises(PasoError):
g = Encoders()
# 1
def test_Class_init_WrongScaler():
with pytest.raises(PasoError):
g = Encoders("GORG")
# BoxCoxScaler unit tests
# 2
def test_EncoderList(X):
assert Encoders("BaseNEncoder").encoders() == [
"BackwardDifferenceEncoder",
"BinaryEncoder",
"HashingEncoder",
"HelmertEncoder",
"OneHotEncoder",
"OrdinalEncoder",
"SumEncoder",
"PolynomialEncoder",
"BaseNEncoder",
"LeaveOneOutEncoder",
"TargetEncoder",
"WOEEncoder",
"MEstimateEncoder",
"JamesSteinEncoder",
"CatBoostEncoder",
"EmbeddingVectorEncoder",
]
# 3
def test_bad_encoder_name():
with pytest.raises(PasoError):
g = Encoders("fred")
# 4
def test_BaseNEncoder_no_df(X):
with pytest.raises(PasoError):
Encoders(description_filepath="../../descriptions/pre/encoders/OHE.yaml").train(
[["Male", 1], ["Female", 3], ["Female", 2]]
)
# 5
def test_OrdinaEncoders(X):
h = [["Male", 1], ["Female", 3], ["Female", 2]]
hdf = | pd.DataFrame(h) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
| tm.assert_index_equal(rng, expected) | pandas.util.testing.assert_index_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze SON scan csv file. You can run this as a script.
Optional argument of script is a slice (notation 0:) or
list of indices (comma-soparated, e.g. 0,1,-2,-1).
Copyright <NAME> (2022) - Twitter: @hk_nien
License: MIT.
Created on Sat Feb 5 23:28:03 2022
"""
import sys
import os
from pathlib import Path
import re
import datetime
import pandas as pd
import numpy as np
def _get_1csv_df(csv_fname):
"""Load csv, return df; handle data without api_version, all_slots column"""
df = pd.read_csv(csv_fname, comment='#')
if 'api_version' not in df.columns:
df['api_version'] = 1
if 'xfields' not in df.columns:
df['xfields'] = ''
else:
df.loc[df['xfields'].isna(), 'xfields'] = ''
if 'all_slots' not in df.columns:
df['all_slots'] = ''
else:
df.loc[df['all_slots'].isna(), 'all_slots'] = ''
return df
def get_csv_as_dataframe(csv_fname='data-son/son_scan-latest.csv'):
"""Load CSV file(s) and do minor preprocessing.
Parameters:
- csv_fname: CSV filename (str) or list of str.
Return:
- df: DataFrame with CSV contents; timestamps converted to pandas Timestamp.
- scan_times: list of scan start times (Timestamps). Use this for
slicing the DataFrame into separate scans.
Note: csv files will be put into chronological order, but it won't handle
overlapping ranges for 'scan_time'.
"""
if isinstance(csv_fname, (str, Path)):
csv_fnames = [csv_fname]
else:
csv_fnames = list(csv_fname)
df_list = [_get_1csv_df(fn) for fn in csv_fnames]
df_list = sorted(df_list, key=lambda df: df.iloc[0]['scan_time'])
df = pd.concat(df_list).reset_index().drop(columns='index')
df['scan_time'] = pd.to_datetime(df['scan_time'])
df['apt_date'] = pd.to_datetime(df['apt_date'])
# Because of dummy rows, int columns become float.
for c in df.columns:
if c.startswith('num') and df[c].dtype != np.int64:
df.loc[df[c].isna(), c] = 0
df[c] = df[c].astype(int)
# figure out scan periods
dts = df['scan_time'].diff()
dts.iloc[0] = pd.Timedelta('1d')
scan_start_tms = df.loc[dts > pd.Timedelta('15min'), 'scan_time'].to_list()
return df, scan_start_tms
def _analyze_1scan_loc_mutations(df1, prev_addresses, silent=False):
"""Analyze DataFrame for one scan for location mutations.
Params:
- df1: 1-scan dataframe slice
- prev_addresses: set of previous-scan addresess; will be updated.
- silent: True to suppress output.
"""
tm0 = df1.iloc[0]['scan_time']
if np.all( | pd.isna(df1['apt_date']) | pandas.isna |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from pymo.rotation_tools import Rotation
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'axis_angle':
return self._to_axis_angle(X)
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise UnsupportedParamError('quat2euler is not supported')
elif self.param_type == 'position':
print('positions 2 eulers is not supported')
return X
else:
raise UnsupportedParamError('Unsupported param: %s. Valid param types are: euler, quat, expmap, position' % self.param_type)
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to rotation matrices
############################ input rotation order as Rotation class's argument #########################
rotmats = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).rotmat for f in euler_values])
########################################################################################################
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.asarray([np.matmul(rotmats[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add the position channel to the offset and store it in k, for every frame i
k = np.asarray([np.add(pos_values[i], track.skeleton[joint]['offsets'])
for i in range(len(tree_data[parent][0]))])
# multiply k to the rotmat of the parent for every frame i
q = np.asarray([np.matmul(k[i], tree_data[parent][0][i])
for i in range(len(tree_data[parent][0]))])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = np.asarray([np.add(q[i], tree_data[parent][1][i])
for i in range(len(tree_data[parent][1]))])
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_axis_angle(self, X):
'''Converts joints rotations in Euler angles to axis angle rotations'''
Q = []
for track in X:
# fix track names
# adapt joint name so that it's equal for either male or female
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the axis angle values
axis_anlge_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
else:
euler_values = [[f[1]['%s_Xrotation'%joint],
f[1]['%s_Yrotation'%joint],
f[1]['%s_Zrotation'%joint]] for f in rc.iterrows()]
################# in euler angle, the order of rotation axis is very important #####################
rotation_order = rc.columns[0][rc.columns[0].find('rotation') - 1] + rc.columns[1][rc.columns[1].find('rotation') - 1] + rc.columns[2][rc.columns[2].find('rotation') - 1] #rotation_order is string : 'XYZ' or'ZYX' or ...
####################################################################################################
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
#euler_values = [[0,0,0] for f in rc.iterrows()] #for deugging
#pos_values = [[0,0,0] for f in pc.iterrows()] #for deugging
# Convert the eulers to axis angles
############################ input rotation order as Rotation class's argument #########################
axis_angles = np.asarray([Rotation([f[0], f[1], f[2]], 'euler', rotation_order, from_deg=True).get_euler_axis() for f in euler_values])
########################################################################################################
# Create the corresponding columns in the new DataFrame
axis_anlge_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in pos_values], index=axis_anlge_df.index)
axis_anlge_df['%s_Zposition'%joint] = | pd.Series(data=[e[2] for e in pos_values], index=axis_anlge_df.index) | pandas.Series |
import tarfile
import os
import shutil
import pydicom
import pandas as pd
from pydicom.filebase import DicomBytesIO
import pathlib
import re
def GetTailFolder(parent, path):
path = os.path.join(
parent, os.path.basename(os.path.normpath(path)))
return path
class Tarloader():
def __init__(self, opt):
self.opt = opt
self.MakeTarDestFolder()
self.MakeOutDestFolder()
self.files = self.GetFiles()
self.tarfiles = self.GetTarFiles()
self.meta_cols = meta_cols = ['DirName','BodyPartExamined',
'Modality','PatientID', 'SOPInstanceUID']
self.col_dict = {col: [] for col in meta_cols}
def extractDcmList(self, tar, tarFold):
tarfolders = self.extractTarFolders(tar)
idx = [i for i, s in enumerate(tarfolders) if self.patientID in s]
dicomFiles = []
for i in range(0, len(tarfolders[idx[0]])):
f = tar.extractfile(tar.getmember(tarfolders[idx[0]][i]))
if os.path.splitext(tar.getmember(tarfolders[idx[0]][i]).name)[1] =='.dcm':
content = f.read()
raw = DicomBytesIO(content)
ds = pydicom.dcmread(raw)
dicomFiles.append(ds)
return dicomFiles
def getUniqueSubFolds(self, tar):
return list(set([pathlib.Path(i).parts[0] for i in tar.getnames()]))
def extractTarFolders(self, tar):
archives = []
liste = self.getUniqueSubFolds(tar)
for i in liste:
archives.append([x for x in tar.getnames() if re.match(i,x)])
return archives
def extractMeta(self, tarFold, tar):
for i in range(0, len(tarFold)):
f = tar.extractfile(tar.getmember(tarFold[i]))
if os.path.splitext(tar.getmember(tarFold[i]).name)[1] =='.dcm':
content = f.read()
raw = DicomBytesIO(content)
ds = pydicom.dcmread(raw)
break
return ds
def ReadTarFile(self, tarpath):
tar = tarfile.open(tarpath)
return tar
def ReadTar(self):
for tarpath in self.tarfiles:
tar = self.ReadTarFile(tarpath)
tarFolders = self.extractTarFolders(tar)
for tarFold in tarFolders:
ds = self.extractMeta(tarFold, tar)
setattr(ds,'DirName', os.path.basename(tarpath))
for col in self.meta_cols:
self.col_dict[col].append(str(getattr(ds, col)))
df = | pd.DataFrame.from_dict(self.col_dict) | pandas.DataFrame.from_dict |
"""Tests for running combo cases and deaths indicator."""
import logging
from datetime import date
from itertools import product
import os
import unittest
from unittest.mock import patch, call
import pandas as pd
import numpy as np
from delphi_combo_cases_and_deaths.run import (
run_module,
extend_raw_date_range,
get_updated_dates,
sensor_signal,
combine_usafacts_and_jhu,
compute_special_geo_dfs,
COLUMN_MAPPING)
from delphi_combo_cases_and_deaths.constants import METRICS, SMOOTH_TYPES, SENSORS
from delphi_utils.geomap import GeoMapper
TEST_LOGGER = logging.getLogger()
def test_issue_dates():
"""The smoothed value for a particular date is computed from the raw
values for a span of dates. We want users to be able to see in the
API all the raw values that went into the smoothed computation,
for transparency and peer review. This means that each issue
should contain more days of raw data than smoothed data.
"""
reference_dr = [date.today(), date.today()]
params = {'indicator': {'date_range': reference_dr}}
n_changed = 0
variants = [sensor_signal(metric, sensor, smoother) for
metric, sensor, smoother in
product(METRICS, SENSORS, SMOOTH_TYPES)]
variants_changed = []
for sensor_name, _ in variants:
dr = extend_raw_date_range(params, sensor_name)
if dr[0] != reference_dr[0]:
n_changed += 1
variants_changed.append(sensor_name)
assert n_changed == len(variants) / 2, f"""
Raw variants should post more days than smoothed.
All variants: {variants}
Date-extended variants: {variants_changed}
"""
@patch("covidcast.covidcast.signal")
def test_unstable_sources(mock_covidcast_signal):
"""Verify that combine_usafacts_and_jhu assembles the combined data
frame correctly for all cases where 0, 1, or both signals are
available.
"""
date_count = [1]
def jhu(geo, c=date_count):
if geo == "state":
geo_val = "pr"
elif geo == "msa":
geo_val = "38660"
else:
geo_val = "72001"
return pd.DataFrame(
[(date.fromordinal(c[0]),geo_val,1,1,1)],
columns="timestamp geo_value value stderr sample_size".split())
def uf(geo, c=date_count):
if geo == "state":
geo_val = "ny"
elif geo == "msa":
geo_val = "10580"
else:
geo_val = "36001"
return pd.DataFrame(
[(date.fromordinal(c[0]),geo_val,1,1,1)],
columns="timestamp geo_value value stderr sample_size".split())
def make_mock(geo):
# The first two in each row provide a unique_date array of the appropriate length for
# query of the latter two (in combine_usafacts_and_jhu)
return [
# 1 0
uf(geo), None, uf(geo), None,
# 0 1
None, jhu(geo),
# 1 1
uf(geo), jhu(geo), uf(geo), jhu(geo),
# 0 0
None, None
]
geos = ["state", "county", "msa", "nation", "hhs"]
outputs = [df for g in geos for df in make_mock(g)]
mock_covidcast_signal.side_effect = outputs[:]
date_range = [date.today(), date.today()]
calls = 0
for geo in geos:
for config, call_size, expected_size in [
("1 0", 4, 1),
("0 1", 2, 0),
("1 1", 4, 1 if geo in ["nation", "hhs"] else 2),
("0 0", 2, 0)
]:
df = combine_usafacts_and_jhu("", geo, date_range, TEST_LOGGER, fetcher=mock_covidcast_signal)
assert df.size == expected_size * len(COLUMN_MAPPING), f"""
Wrong number of rows in combined data frame for the number of available signals.
input for {geo} {config}:
{outputs[calls]}
{outputs[calls + 1]}
output:
{df}
expected rows: {expected_size}
"""
calls += call_size
date_count[0] += 1
@patch("covidcast.covidcast.signal")
def test_multiple_issues(mock_covidcast_signal):
"""Verify that only the most recent issue is retained."""
mock_covidcast_signal.side_effect = [
pd.DataFrame({
"geo_value": ["01000", "01000"],
"value": [1, 10],
"timestamp": [20200101, 20200101],
"issue": [20200102, 20200104]
}),
None
] * 2
result = combine_usafacts_and_jhu("confirmed_incidence_num", "county", date_range=(0, 1), logger=TEST_LOGGER, fetcher=mock_covidcast_signal)
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{
"geo_id": ["01000"],
"val": [10],
"timestamp": [20200101],
"issue": [20200104]
},
index=[1]
)
)
def test_compute_special_geo_dfs():
test_df = pd.DataFrame({"geo_id": ["01000", "01001"],
"val": [50, 100],
"timestamp": [20200101, 20200101]},)
df = compute_special_geo_dfs(test_df, "_prop", "nation")
state_pop = GeoMapper().get_crosswalk("state_code", "pop")
state_pop = int(state_pop.loc[state_pop.state_code == "01", "pop"])
expected_df = pd.DataFrame({
"timestamp": [20200101],
"geo_id": ["us"],
"val": [150/state_pop*100000]
})
| pd.testing.assert_frame_equal(df, expected_df) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Generator reserve plots.
This module creates plots of reserve provision and shortage at the generation
and region level.
@author: <NAME>
"""
import logging
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""reserves MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The reserves.py module contains methods that are
related to reserve provision and shortage.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
def reserve_gen_timeseries(self, figure_name: str = None, prop: str = None,
start: float = None, end: float= None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation timeseries stackplot of total cumulative reserve provision by tech type.
The code will create either a facet plot or a single plot depending on
if the Facet argument is active.
If a facet plot is created, each scenario is plotted on a separate facet,
otherwise all scenarios are plotted on a single plot.
To make a facet plot, ensure the work 'Facet' is found in the figure_name.
Generation order is determined by the ordered_gen_categories.csv.
Args:
figure_name (str, optional): User defined figure output name. Used here
to determine if a Facet plot should be created.
Defaults to None.
prop (str, optional): Special argument used to adjust specific
plot settings. Controlled through the plot_select.csv.
Opinions available are:
- Peak Demand
- Date Range
Defaults to None.
start (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot before a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
end (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot after a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# If not facet plot, only plot first scenario
facet=False
if 'Facet' in figure_name:
facet = True
if not facet:
Scenarios = [self.Scenarios[0]]
else:
Scenarios = self.Scenarios
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
xdimension, ydimension = self.setup_facet_xy_dimensions(facet,multi_scenario=Scenarios)
grid_size = xdimension*ydimension
excess_axs = grid_size - len(Scenarios)
fig1, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
data_tables = []
unique_tech_names = []
for n, scenario in enumerate(Scenarios):
self.logger.info(f"Scenario = {scenario}")
reserve_provision_timeseries = self["reserves_generators_Provision"].get(scenario)
#Check if zone has reserves, if not skips
try:
reserve_provision_timeseries = reserve_provision_timeseries.xs(region,level=self.AGG_BY)
except KeyError:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
reserve_provision_timeseries = self.df_process_gen_inputs(reserve_provision_timeseries)
if reserve_provision_timeseries.empty is True:
self.logger.info(f"No reserves deployed in: {scenario}")
continue
# unitconversion based off peak generation hour, only checked once
if n == 0:
unitconversion = PlotDataHelper.capacity_energy_unitconversion(max(reserve_provision_timeseries.sum(axis=1)))
if prop == "Peak Demand":
self.logger.info("Plotting Peak Demand period")
total_reserve = reserve_provision_timeseries.sum(axis=1)/unitconversion['divisor']
peak_reserve_t = total_reserve.idxmax()
start_date = peak_reserve_t - dt.timedelta(days=start)
end_date = peak_reserve_t + dt.timedelta(days=end)
reserve_provision_timeseries = reserve_provision_timeseries[start_date : end_date]
Peak_Reserve = total_reserve[peak_reserve_t]
elif prop == 'Date Range':
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
reserve_provision_timeseries = reserve_provision_timeseries[start_date_range : end_date_range]
else:
self.logger.info("Plotting graph for entire timeperiod")
reserve_provision_timeseries = reserve_provision_timeseries/unitconversion['divisor']
scenario_names = pd.Series([scenario] * len(reserve_provision_timeseries),name = 'Scenario')
data_table = reserve_provision_timeseries.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append = True)
data_tables.append(data_table)
plotlib.create_stackplot(axs, reserve_provision_timeseries, self.PLEXOS_color_dict, labels=reserve_provision_timeseries.columns,n=n)
PlotDataHelper.set_plot_timeseries_format(axs,n=n,minticks=4, maxticks=8)
if prop == "Peak Demand":
axs[n].annotate('Peak Reserve: \n' + str(format(int(Peak_Reserve), '.2f')) + ' {}'.format(unitconversion['units']),
xy=(peak_reserve_t, Peak_Reserve),
xytext=((peak_reserve_t + dt.timedelta(days=0.25)), (Peak_Reserve + Peak_Reserve*0.05)),
fontsize=13, arrowprops=dict(facecolor='black', width=3, shrink=0.1))
# create list of gen technologies
l1 = reserve_provision_timeseries.columns.tolist()
unique_tech_names.extend(l1)
if not data_tables:
self.logger.warning(f'No reserves in {region}')
out = MissingZoneData()
outputs[region] = out
continue
# create handles list of unique tech names then order
handles = np.unique(np.array(unique_tech_names)).tolist()
handles.sort(key = lambda i:self.ordered_gen.index(i))
handles = reversed(handles)
# create custom gen_tech legend
gen_tech_legend = []
for tech in handles:
legend_handles = [Patch(facecolor=self.PLEXOS_color_dict[tech],
alpha=1.0,
label=tech)]
gen_tech_legend.extend(legend_handles)
# Add legend
axs[grid_size-1].legend(handles=gen_tech_legend, loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
#Remove extra axes
if excess_axs != 0:
PlotDataHelper.remove_excess_axs(axs,excess_axs,grid_size)
# add facet labels
self.add_facet_labels(fig1)
fig1.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
if mconfig.parser("plot_title_as_region"):
plt.title(region)
plt.ylabel(f"Reserve Provision ({unitconversion['units']})", color='black', rotation='vertical', labelpad=40)
data_table_out = pd.concat(data_tables)
outputs[region] = {'fig': fig1, 'data_table': data_table_out}
return outputs
def total_reserves_by_gen(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a generation stacked barplot of total reserve provision by generator tech type.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"reserves_generators_Provision",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for region in self.Zones:
self.logger.info(f"Zone = {region}")
Total_Reserves_Out = | pd.DataFrame() | pandas.DataFrame |
"""
Created on Wed Apr 10 10:04:29 2019
@author: <NAME> (<EMAIL>)
"""
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
from copy import copy
import pandas as pd
from matplotlib import colors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from numpy.matlib import repmat
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist, pdist
import seaborn as sns
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import shutil
import random
import os
def get_random_color():
"""
Original @author: <NAME>
url: https://www.kaggle.com/vitorgamalemos/house-prices-data-exploration
ref: Taken from Kaggle Advanced House Price
"""
r1 = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r1(),r1(),r1())
def get_histplot(df: dict, fields: list):
"""
Original @author: <NAME>
url: https://www.kaggle.com/vitorgamalemos/house-prices-data-exploration
ref: Taken from Kaggle Advanced House Price
"""
for field in fields:
f, (ax1) = plt.subplots(1, 1, figsize=(15, 5))
v_dist_1 = df[field].values
sns.histplot(v_dist_1, ax=ax1, color=get_random_color(), kde=True)
mean=df[field].mean()
median=df[field].median()
mode=df[field].mode().values[0]
ax1.axvline(mean, color='r', linestyle='--', label="Mean")
ax1.axvline(median, color='g', linestyle='-', label="Median")
ax1.axvline(mode, color='b', linestyle='-', label="Mode")
ax1.legend()
plt.title(f"{field} - Histogram analysis")
def get_scatter(df: dict, fields: list, label: str):
ylim = (0, df[label].max() * 1.1)
for field in fields:
df_copy = | pd.concat([df[label], df[field]], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 13:08:01 2021
@author: <NAME>
"""
import scipy
import numpy as np
import pandas as pd
import statsmodels.api as sm
import scipy.stats as stats
#print stats.stats.spearmanr(x,y)
def factor_IC_test(factor_data, market_cap_data, stock_return):
"""
:param factor_data: the residual of the regression(factor exposure(t) with respect to market-cap(t) and
industries factor(t)(dummy)
:param stock_return: monthly stock return (t+1)
:return: correlations between factor exposure(t) and stock return(t+1) (a dataframe)
tips: We use this residual as a proxy of factor exposure, which is both industries-adjusted and market-cap-adjusted;
Examine the mean (significance), std(stability), IR ratio(mean/std), the propotion that correlation>0 (direction)
"""
Ic=pd.DataFrame()
beta0=pd.DataFrame()
length=min(factor_data.shape[1],market_cap_data.shape[1])#74
for i in range(7,length):#2015-06
y = np.array(factor_data.iloc[:,i]) # 因变量为factor第i数据
x = np.array(market_cap_data.iloc[:,i]) # 自变量为第 i列数据
x = sm.add_constant(x) # 若模型中有截距,必须有这一步
model = sm.OLS(y, x).fit() # 构建最小二乘模型并拟合
a=model.resid
beta0[i-7]=a
# beta0=factor_data
length=min(beta0.shape[1],stock_return.shape[1])
for i in range(length):
#Ic.append(scipy.stats.pearsonr(beta0.iloc[:,i], stock_return.iloc[:,i]))
#Ic.append(stats.stats.spearmanr(beta0.iloc[:,i], stock_return.iloc[:,i]))
Ic[i]=stats.stats.spearmanr(beta0.iloc[:,i], stock_return.iloc[:,i])
residuals=Ic.iloc[0,:]
p_value=Ic.iloc[1,:]
print("%d residuals are:" % len(residuals))
#print(Ic.iloc[0,:])
print("the %d p_value of the residuals are:" % len(residuals))
# print(Ic.iloc[1,:])
print("the Percentage of positive residuals is:")
print(residuals[residuals>0].count()/len(residuals))
print("the stand devition of the residual are: ")
print(residuals.std())
print("the absulute mean of the residuals are: ")
residuals=residuals.abs()
print(residuals.mean())
print("the stand devition of the p_value are: ")
print(p_value.std())
print("the absulute mean of the p_value are: ")
p_value=p_value.abs()
print(p_value.mean())
return 0
if __name__ == '__main__':
path0="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Stock_return2.csv"
path1="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Market_Value.csv"
path2="C:/Users/zhang dejian/Downloads/resource/703/project/CI/EP.csv"
path3="C:/Users/zhang dejian/Downloads/resource/703/project/CI/BP.csv"
path4="C:/Users/zhang dejian/Downloads/resource/703/project/CI/ROA.csv"
path5="C:/Users/zhang dejian/Downloads/resource/703/project/CI/ROE.csv"
path6="C:/Users/zhang dejian/Downloads/resource/703/project/CI/CFP.csv"
path7="C:/Users/zhang dejian/Downloads/resource/703/project/CI/asset_to_liability.csv"
path8="C:/Users/zhang dejian/Downloads/resource/703/project/CI/CF_to_Liability.csv"
path9="C:/Users/zhang dejian/Downloads/resource/703/project/CI/debt_to_asset.csv"
path10="C:/Users/zhang dejian/Downloads/resource/703/project/CI/RSI-30.csv"
path11="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Turnover.csv"
path12="C:/Users/zhang dejian/Downloads/resource/703/project/CI/cash_ratio.csv"
path13="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Div_yeild.csv"
path14="C:/Users/zhang dejian/Downloads/resource/703/project/CI/EBITDA_EV.csv"
path15="C:/Users/zhang dejian/Downloads/resource/703/project/CI/volatility.csv"
stock_return=pd.read_csv(path0)
market_cap_data=pd.read_csv(path1)
EP=pd.read_csv(path2)
BP=pd.read_csv(path3)
ROA=pd.read_csv(path4)
ROE=pd.read_csv(path5)
CFP=pd.read_csv(path6)
asset_to_liability=pd.read_csv(path7)
CF_to_Liability=pd.read_csv(path8)
debt_to_asset=pd.read_csv(path9)
RSI_30=pd.read_csv(path10)
Turnover=pd.read_csv(path11)
cash_ratio=pd.read_csv(path12)
Div_yeild=pd.read_csv(path13)
EBITDA_EV= | pd.read_csv(path14) | pandas.read_csv |
from bs4 import BeautifulSoup
import urllib.request
import pandas as pd
def dateToURL(month, day, year):
month=str(month)
day=str(day)
year=str(year)
if len(month)==1:
month="0"+month
if len(day)==1:
day="0"+day
return 'https://biz.yahoo.com/research/earncal/{}{}{}.html'.format(year, month, day)
def pullEarnings(month, day, year):
try:
with urllib.request.urlopen(dateToURL(month, day, year)) as page:
soup=BeautifulSoup(page, 'lxml')
# print(soup.prettify())
table=soup.find_all('table')[5]
company=[]
symbol=[]
EPS_estimate=[]
time=[]
for row in table.findAll('tr'):
cells = row.findAll('td')
if len(cells) >=5: # Only extract table body not heading
company.append(cells[0].find(text=True))
symbol.append(cells[1].find(text=True))
EPS_estimate.append(cells[2].find(text=True))
time.append(cells[3].find(text=True))
if company[0]=='\n' or symbol[0]=='\n'or EPS_estimate[0]=='\n'or time[0]=='\n':
company=company[1:]
symbol=symbol[1:]
EPS_estimate=EPS_estimate[1:]
time=time[1:]
earnings= | pd.DataFrame({company[0]:company[1:],symbol[0]:symbol[1:],EPS_estimate[0]:EPS_estimate[1:],time[0]:time[1:] }) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Extract a COCO captions dataframe from the annotation files."""
from __future__ import print_function
import os
import sys
import argparse
import pandas as pd
def main(args):
"""Extract a COCO captions dataframe from the annotation files."""
# Load coco library
sys.path.append(args.coco_path + '/PythonAPI')
from pycocotools.coco import COCO
set_2014 = ['val2014', 'train2014']
set_2017 = ['val2017', 'train2017']
# Make dataframe to store captions in
cocoDF = pd.DataFrame(columns=['id', 'set', 'filename', 'caption'])
for st in set_2014 + set_2017:
print('\nProcessing {}'.format(st))
# Instantiate coco classes
coco = COCO(args.coco_path +
'annotations/instances_{}.json'.format(st))
coco_anns = COCO(args.coco_path +
'annotations/captions_{}.json'.format(st))
# Get Categories
cats = coco.loadCats(coco.getCatIds())
# Get unique image ids
imgIds = []
for cat in cats:
imgId = coco.getImgIds(catIds=cat['id'])
imgIds += imgId
imgIds = list(set(imgIds))
# Get annotations
annIds = coco_anns.getAnnIds(imgIds=imgIds)
anns = coco_anns.loadAnns(annIds)
# Extract ids and captions as tuples
captions = [(int(ann['image_id']), ann['caption']) for ann in anns]
print(len(captions))
# Extract filenames as tuples
img_ids = list(set([ann['image_id'] for ann in anns]))
imgs = coco.loadImgs(img_ids)
filenames = [(int(img['id']), st + '/' + img['file_name'])
for img in imgs]
# Make dataframe of captions and filenames
captionDF = pd.DataFrame(captions, columns=['id', 'caption'])
filesDF = pd.DataFrame(filenames, columns=['id', 'filename'])
# Merge dataframes on image id
df = captionDF.merge(filesDF, how='outer', on='id')
# Assign to set
df['set'] = st
# Concatenate to resultsDF
cocoDF = pd.concat([cocoDF, df], axis=0)
# Temporarily store intermediate data
df.to_csv(args.interim_result_path + 'coco_' +
st + '_captions.csv', index=False)
print('\nDone Converting')
print('Number of images: {}'.format(cocoDF['id'].nunique()))
cocoDF.to_csv(args.coco_path +
'annotations/coco_captions.csv', index=False)
print('Saved merged set to ' + args.coco_path +
'annotations/coco_captions.csv')
# Make 2014 and 2017 dataframes
val2014DF = pd.read_csv(args.interim_result_path +
'coco_val2014_captions.csv')
val2017DF = pd.read_csv(args.interim_result_path +
'coco_val2017_captions.csv')
train2014DF = pd.read_csv(
args.interim_result_path + 'coco_train2014_captions.csv')
train2017DF = pd.read_csv(
args.interim_result_path + 'coco_train2017_captions.csv')
# Concate by year
df2014 = | pd.concat([val2014DF, train2014DF], axis=0) | pandas.concat |
import re
import requests
from bs4 import BeautifulSoup
from mnemon import mnc
import pandas as pd
from .utils import SearchableDataFrame, get_re, EXPIRE
BASE_URL = "http://stats.oecd.org/sdmx-json"
def get_index(ds):
"""Converts the index to a DatetimeIndex"""
v = [ | pd.Period(k["id"]) | pandas.Period |
import datetime as dt
import json
import os
import time
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
import datetime
# Statements with "." allows for relative path importing for WebApp and WebAPI
# from .ImportSecurities import *
# from .utils.aws_util import *
# from .utils.data_util import *
# from .utils.indicators import *
# Statements without "." should be used when running the app/main function independent of WebApp and WebAPI
from ImportSecurities import *
from utils.aws_util import *
from utils.data_util import *
from utils.indicators import *
import data_util_test
def gather_download_data(sd, ed, download_new_data=False):
symbols_config_fp = os.path.join(os.getcwd(), 'config', 'symbols_config.json')
with open(symbols_config_fp) as fp:
symbols_config = json.load(fp)
symbols_array = []
for category, array in symbols_config.items():
symbols_array.append(array)
flat_symbols = [item for sublist in symbols_array for item in sublist]
if download_new_data:
spaces_array = []
for array in symbols_array:
spaces = " ".join(array)
spaces_array.append(spaces)
gather_data(symbols_array, spaces_array, sd=sd, ed=ed)
def s3_upload_and_list():
# Set up variables
cwd = os.getcwd()
data_directory = os.path.join(cwd, 'data')
# Read Config
aws_config_fp = os.path.join(os.getcwd(), 'config', 'aws_config.json')
with open(aws_config_fp) as fp:
aws_config = json.load(fp)
# Set up Session & Resource
session = start_session(aws_config['access_key'], aws_config['secret_access_key'])
s3 = get_s3_resource(session)
bucket = aws_config['bucket_name']
# List current Buckets & Objects per Bucket
print_bucket_objects(s3, bucket)
# Upload files to Bucket
files = [f for f in os.listdir(data_directory) if f.endswith('.csv')]
for file in files:
upload_file_to_bucket(s3, bucket, os.path.join(data_directory, file), file)
# (Optional) Delete files from Bucket
# for file in files:
# delete_object(s3, bucket, file)
# List Buckets & Objects after Upload
print_bucket_objects(s3, bucket)
def get_technical_indicators_for_date(symbol,
given_date,
start_date=dt.datetime(2012, 1, 31),
end_date=dt.datetime.today()):
stock_data = get_ohlcv(symbol, start_date, end_date, base_dir='trading_assistant_app/data')
technical_indicators = get_technical_indicators_for_symbol(stock_data)
try:
return_dict = {
'Price/SMA5': technical_indicators['Price/SMA5'][given_date],
'Price/SMA10': technical_indicators['Price/SMA10'][given_date],
'Price/SMA20': technical_indicators['Price/SMA20'][given_date],
'Price/SMA50': technical_indicators['Price/SMA50'][given_date],
'Price/SMA200': technical_indicators['Price/SMA200'][given_date],
'BB%10': technical_indicators['BB%10'][given_date],
'BB%20': technical_indicators['BB%20'][given_date],
'BB%50': technical_indicators['BB%50'][given_date],
'RSI5': technical_indicators['RSI5'][given_date],
'RSI10': technical_indicators['RSI10'][given_date],
'MACD9': technical_indicators['MACD9'][given_date],
'MOM5': technical_indicators['MOM5'][given_date],
'VAMA10': technical_indicators['VAMA10'][given_date]
}
except KeyError as e:
print(f'Invalid given_date index/key for {e}')
return_dict = {
'Price/SMA5': 0,
'Price/SMA10': 0,
'Price/SMA20': 0,
'Price/SMA50': 0,
'Price/SMA200': 0,
'BB%10': 0,
'BB%20': 0,
'BB%50': 0,
'RSI5': 0,
'RSI10': 0,
'MACD9': 0,
'MOM5': 0,
'VAMA10': 0
}
return return_dict
def get_wsb_volume_for_date(symbol, given_date):
# gather reddit mention counts
# This allows for relative path retrieval for WebApp and WebAPI
reddit_fp = os.path.join('trading_assistant_app', 'reddit_refined', f'{symbol}_rss_wc.csv')
# This should be used when running the app/main function independent of WebApp and WebAPI
# reddit_fp = os.path.join(os.getcwd(), 'reddit_data', f'{symbol}_rss_wc.csv')
try:
df_reddit = pd.read_csv(reddit_fp)
except FileNotFoundError as e:
return {
'wsb_volume': 0
}
df_reddit = df_reddit.set_index('Date')
df_reddit.index = pd.to_datetime(df_reddit.index)
df_reddit = df_reddit.drop('Ticker', axis=1)
try:
value = df_reddit['wsb_volume'][given_date].item()
return_dict = {
'wsb_volume': value
}
except KeyError as e:
# print(f'Invalid given_date index/key for {e}')
return_dict = {
'wsb_volume': 0
}
return return_dict
def get_technical_indicators_for_symbol(stock_data):
price_sma_5_symbol = get_price_sma(stock_data, window=5)
price_sma_10_symbol = get_price_sma(stock_data, window=10)
price_sma_20_symbol = get_price_sma(stock_data, window=20)
price_sma_50_symbol = get_price_sma(stock_data, window=50)
price_sma_200_symbol = get_price_sma(stock_data, window=200)
bb10_pct_symbol = get_bb_pct(stock_data, window=10)
bb20_pct_symbol = get_bb_pct(stock_data, window=20)
bb50_pct_symbol = get_bb_pct(stock_data, window=50)
rsi5_symbol = get_rsi(stock_data, window=5)
rsi10_symbol = get_rsi(stock_data, window=10)
macd_symbol = get_macd_signal(stock_data, signal_days=9)
mom_symbol = get_momentum(stock_data, window=5)
vama_symbol = get_vama(stock_data, window=10)
# Compile TA into joined DF & FFILL / BFILL
df_indicators = pd.concat([price_sma_5_symbol, price_sma_10_symbol, price_sma_20_symbol,
price_sma_50_symbol, price_sma_200_symbol, bb10_pct_symbol,
bb20_pct_symbol, bb50_pct_symbol, rsi5_symbol,
rsi10_symbol, macd_symbol, mom_symbol, vama_symbol], axis=1)
df_indicators.fillna(0, inplace=True)
return df_indicators
def write_predictions_to_csv(start_date, end_date, percent_gain, path, debug=False):
date_range = pd.date_range(start_date, end_date)
buy_data = dict()
sell_data = dict()
for date in date_range:
predictions_dictionary = get_list_of_predicted_stocks(percent_gain, date)
buy_signal_recognized_list = predictions_dictionary['buy_signal_recognized_list']
buy_signal_recognized_str = '_'.join(buy_signal_recognized_list)
sell_signal_recognized_list = predictions_dictionary['sell_signal_recognized_list']
sell_signal_recognized_str = '_'.join(sell_signal_recognized_list)
buy_data[date] = buy_signal_recognized_str
sell_data[date] = sell_signal_recognized_str
df_buy = pd.DataFrame(buy_data.items(), columns=['Date', 'Symbols'])
df_buy = df_buy.set_index('Date')
df_buy.to_csv(os.path.join(path, f'buy_predictions.csv'))
df_sell = pd.DataFrame(sell_data.items(), columns=['Date', 'Symbols'])
df_sell = df_sell.set_index('Date')
df_sell.to_csv(os.path.join(path, f'sell_predictions.csv'))
def read_predictions(given_date, minimum_count=0, buy=True, debug=False):
df = pd.read_csv(f'trading_assistant_app/predictions/{"buy_predictions" if buy else "sell_predictions"}.csv')
df = df.set_index('Date')
try:
symbols = df['Symbols'][given_date]
except KeyError as e:
print(f'Invalid given_date index/key for {e}')
symbols = ''
if isinstance(symbols, float):
if np.isnan(symbols):
return []
elif isinstance(symbols, str):
predictions_list = symbols.split('_')
if buy:
filtered = filter(lambda symbol:
get_wsb_volume_for_date(symbol, given_date)['wsb_volume'] > minimum_count, predictions_list)
filtered_list = list(filtered)
else:
filtered_list = predictions_list
return filtered_list
def prepare_data(symbols, start_date, end_date, percent_gain, debug=False):
# df_array = list()
# initialize dictionary to hold dataframe per symbol
df_dict = {}
# remove the index from the list of symbols
if "SPY" in symbols:
symbols.remove("SPY")
for symbol in symbols:
# get stock data for a given time
# This allows for relative path retrieval for WebApp and WebAPI
# ***
# stock_data = get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('trading_assistant_app', 'data'))
# This should be used when running the app/main function independent of WebApp and WebAPI
stock_data = data_util_test.get_ohlcv(symbol, start_date, end_date, base_dir=os.path.join('data'))
# Filter out empty OHLCV DF
if len(stock_data) == 0:
continue
# calculate technical indicators
df_indicators = get_technical_indicators_for_symbol(stock_data)
# gather reddit mention counts
# This allows for relative path retrieval for WebApp and WebAPI
# ***
#reddit_fp = os.path.join('trading_assistant_app', 'reddit_refined', f'{symbol}_rss_wc.csv')
reddit_fp = os.path.join('reddit_refined', f'{symbol}_rss_wc.csv')
# This should be used when running the app/main function independent of WebApp and WebAPI
# reddit_fp = os.path.join(os.getcwd(), 'reddit_data', f'{symbol}_rss.csv')
if os.path.isfile('reddit_refined/' + symbol + '_rss_wc.csv'):
df_reddit = pd.read_csv(reddit_fp)
df_reddit = df_reddit.set_index('Date')
df_reddit.index = pd.to_datetime(df_reddit.index)
df_reddit = df_reddit.drop('Ticker', axis=1)
else:
df_reddit = | pd.DataFrame(columns=["Date","Ticker","wsb_volume"]) | pandas.DataFrame |
# Functions for performing analysis in the article
# "Material Culture Studies in the Age of Big Data:
# Digital Excavation of Homemade Facemask Production
# during the COVID-19 Pandemic"
#
# Code Written By: <NAME>
#
# For import/use instructions, see README.md
import pandas as pd
import geopandas as gpd
import nltk
import itertools
import collections
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# download necessary NLTK Data if don't already have it
nltk.download('stopwords', quiet=True)
nltk.download('punkt', quiet=True)
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
# Define general stop words + study-specific stop-words
stop = nltk.corpus.stopwords.words('english') \
+ ["face", "mask", "masks", "facemask", "facemasks"]
# Term lists
intentionality_eff = \
[("two", "layer"), ("double", "layer"), ("2", "layer"),
("three", "layer"), ("triple", "layer"), ("3", "layer"),
("multi", "layer"), ("multiple", "layer"), "multilayer", "multilayered",
"upf", "uv", "thick", "cotton",
("adjustable", "fit"), ("form", "fit"), ("snug", "fit"), ("tight", "fit"),
("nose", "wire"),
("cover", "chin"), ("cover", "nose"), ("cover", "mouth"),
("filter", "pocket"), "cotton", "kn95", "n95"]
intentionality_ineff = \
["mesh", "crochet", "yarn", "lace", "hole",
("one", "layer"), ("single", "layer"), ("1", "layer"),
"compliance", "antimask", ("anti", "mask"), "protest"]
unintentionality_ineff = ["valve", "thin", "loose"]
mesh = ["mesh"]
antimask = ["antimask", ("anti", "mask")]
# List of states won by Biden and Trump, respectively
biden = ["Washington", "Oregon", "California", "Nevada",
"Arizona", "New Mexico", "Colorado", "Hawaii",
"Minnesota", "Wisconsin", "Illinois", "Michigan",
"Georgia", "Pennsylvania", "Virginia", "Maryland",
"New Jersey", "New York", "Massachusetts", "Connecticut",
"Rhode Island", "Delaware", "Vermont", "New Hampshire",
"Maine"]
trump = ["Alaska", "Idaho", "Utah", "Montana",
"Wyoming", "North Dakota", "South Dakota", "Nebraska",
"Kansas", "Oklahoma", "Texas", "Iowa",
"Missouri", "Arkansas", "Louisiana", "Indiana",
"Kentucky", "Tennessee", "Mississippi", "Alabama",
"West Virginia", "Ohio", "North Carolina", "South Carolina",
"Florida"]
def process_data(data_path='data/'):
'''
Takes clean Etsy data (in subdirectory provided as input)
and processes it for user. All of the necessary files (SHP file
containing polygon boundaries of U.S. states from the U.S. Census Bureau
as of 2020, along with a CSV of collected Etsy facemask data that has
had its text columns pre-cleaned of extraneous characters) are
in the data/ subdirectory of this repository, so `data/` is the default
path.
Returns Pandas DataFrame (with lemmatized and tokenized
listing titles), along with a GeoPandas DataFrame, containing
U.S. state polygons from the 2020 census (shp)
'''
df = pd.read_csv(data_path + 'clean_etsy_data.csv')
df['date_collected'] = | pd.to_datetime(df['date_collected']) | pandas.to_datetime |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = | isnull([(1,), (2,)]) | pandas.core.common.isnull |
import networkx as nx
from sklearn import preprocessing
import glob
import warnings
import pandas as pd
from IPython.display import display
import matplotlib.pyplot as plt
pd.options.display.max_columns = 20
warnings.filterwarnings("ignore")
import numpy as np
def read_label():
label = {}
for i in range(3, 5, 2):
hi = 'low_freq/house_{}/labels.dat'.format(i)
label[i] = {}
with open(hi) as f:
for line in f:
splitted_line = line.split(' ')
label[i][int(splitted_line[0])] = splitted_line[1].strip(
) + '_' + splitted_line[0]
return label
def read_merge_data(house, labels):
path = 'low_freq/house_{}/'.format(house)
file = path + 'channel_1.dat'
df = pd.read_table(file, sep=' ', names=['unix_time', labels[house][1]],
dtype={'unix_time': 'int64', labels[house][1]: 'float64'})
num_apps = len(glob.glob(path + 'channel*'))
for i in range(2, num_apps + 1):
file = path + 'channel_{}.dat'.format(i)
data = pd.read_table(file, sep=' ', names=['unix_time', labels[house][i]],
dtype={'unix_time': 'int64', labels[house][i]: 'float64'})
df = | pd.merge(df, data, how='inner', on='unix_time') | pandas.merge |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import urllib.request
import os
import sys
from bs4 import BeautifulSoup
import pandas as pd
pd.set_option('display.max_columns', None)
import statistics as st
from datetime import datetime
import numpy
import matplotlib.pyplot as plotVersus
# from matplotlib.pyplot import figure
# figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
# This method gives the data of latest condition which is currently available on the Hobolink cloud"
def getLatestConditionFromHobolink():
page = urllib.request.urlopen('https://hobolink.com/p/b0a1dc20e6e7b315b81297194bbb9864')
soup = BeautifulSoup(page, 'html.parser').find("div", {"id": "hobolink-latest-conditions-form:datatable-panel"})
divs = []
i = 0
for link in soup.find_all("div"):
i += 1
if (i >= 6):
divs.append(link)
ans = []
for div in divs:
var = []
for values in div.find_all("span"):
var.append(values.text.encode('ascii','ignore'))
ans.append(var)
final_ans = []
for i in range(len(ans)):
var = []
var.append(ans[i][0][:-1])
var.append(ans[i][1] + b" " + ans[i][2])
final_ans.append(var)
return final_ans
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
def pre_process_data(lines):
lines.pop(0)
lines = list(reversed(lines))
data = []
for line in lines:
var = line.encode('ascii','ignore')
x = var.split(b',')
x.pop(0)
for i in range(len(x)):
x[i] = x[i].rstrip()
data.append(x)
df = | pd.DataFrame.from_records(data) | pandas.DataFrame.from_records |
import logging
import click
import random
import pandas as pd
from pytorch_lightning.loggers import TensorBoardLogger
from definitions import REPO_ROOT, RAW_DATA_DIR, PROCESSED_DATA_DIR
import src.data.preprocess_data as prep
from src.data.data_loader import RepeatedStratifiedKFoldDataloader
from src.models.classifier_chain import ClassifierChainEnsemble
from src.models.logistic_regression import (
LogisticRegressionOVRPredictor, LogisticRegressionModel
)
from src.models.xgboost_pipeline import DepthwiseXGBPipeline
import src.data.var_names as abcd_vars
from src.models.evaluation import ResultManager
DATA_DIR = PROCESSED_DATA_DIR / 'abcd_data.csv'
@click.command()
@click.option('--seed', default=0, help='Random number seed.', type=int)
@click.option('--k', default=5, help='Number of CV folds.', type=int)
@click.option('--n', help='Number of successive k-fold CV runs.', type=int)
def main(seed, k, n):
logger = logging.getLogger(__name__)
logger.info(f'Running training and prediction on unpermuted dataset with '
f'seed={seed}, k={k}, n={n}.')
logger.info('Load data')
abcd_data = | pd.read_csv(DATA_DIR, index_col='src_subject_id') | pandas.read_csv |
from unittest import TestCase
from sklearn.model_selection import train_test_split
import pandas as pd
import scipy.io
from pyande.models.statistics.mvg import MultivariateGaussian
from pyande.data.calculations import select_threshold
class TestMVG(TestCase):
def test_mvg(self):
mat = scipy.io.loadmat('./data/cardio.mat')
x_data = mat['X']
y_data = mat['y']
header_list = ["LB-FHR", "AC", "FM", "UC", "DL", "DS", "ASTV", "MSTV", "ALTV", "MLTV",
"width", "min", "max", "nmax", "nzeros", "mode", "mean", "median",
"variance", "tendency", "class-fhr"]
data = | pd.DataFrame(x_data, columns=header_list) | pandas.DataFrame |
import pandas as pd
DELIMITER = '|'
cols = ['Base', 'E_nX', 'E_X', 'C_nX', 'C_X']
connectives = ['I repeat', 'again', 'in short', 'therefore', 'that is', 'thus']
expanders = {
'{Prep}': ['Near', 'By', 'Nearby'], # ['near', 'nearby', 'by']
'{E/D}': ['Here is', 'This is'], # ['Here is', 'There is', 'That is', 'This is', 'It is']
'{Comp}': ['that'], # ['that', 'which']
'{Conn}': ['; ' + c + ', ' for c in connectives] + ['. ' + c[0].upper() + c[1:] + ', ' for c in connectives]
}
# Returns a list of dicts; each tuple becomes a dict
def read_items(filename):
df = pd.read_csv(filename, sep='\t')
assert ~df.isna().values.any(), 'Input CSV has NA'
return df.to_dict('records') # Each dict has keys A, B, Vtr, Vin1, Vin2
# Returns a list of dataframes
def read_templates(*filenames):
templates = []
for f in filenames:
# Read into pandas df
df = | pd.read_csv(f, sep='\t', na_filter=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 13:59:31 2020
@author: <NAME>
"""
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
import numpy as np
from collections import defaultdict
from openpyxl.utils import get_column_letter
from CTDataStruct import CTPatient
import keyboard
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
from settings import initSettings, saveSettings, loadSettings, fillSettingsTags
from classification import createRFClassification, initRFClassification, classifieRFClassification
from filterTenStepsGuide import filter_CACS_10StepsGuide, filter_CACS, filter_NCS, filterReconstruction, filter_CTA, filer10StepsGuide, filterReconstructionRF
from discharge_extract import extractDICOMTags
from tqdm import tqdm
#from reco.reco_filter import RecoFilter
patient_status = ['OK', 'EXCLUDED', 'MISSING_CACS', 'MISSING_CTA', 'MISSING_NC_CACS', 'MISSING_NC_CTA']
patient_status_manual = ['OK', 'EXCLUDED', 'UNDEFINED', 'INPROGRESS']
patient_status_manualStr = '"' + 'OK,' + 'EXCLUDED,' + 'UNDEFINED,' + 'INPROGRESS,' + '"'
scanClasses = defaultdict(lambda:None,{'UNDEFINED': 0, 'CACS': 1, 'CTA': 2, 'NCS_CACS': 3, 'NCS_CTA': 4, 'ICA': 5, 'OTHER': 6})
scanClassesInv = defaultdict(lambda:None,{0: 'UNDEFINED', 1: 'CACS', 2: 'CTA', 3: 'NCS_CACS', 4: 'NCS_CTA', 5: 'ICA', 6: 'OTHER'})
scanClassesStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER' +'"'
scanClassesManualStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER,' + 'PROBLEM,' + 'QUESTION,' +'"'
imageQualityStr = '"' + 'UNDEFINED,' + 'GOOD,' + 'BAD' +'"'
recoClasses = ['FBP', 'IR', 'UNDEFINED']
changeClasses = ['NO_CHANGE', 'SOURCE_CHANGE', 'MASTER_CHANGE', 'MASTER_SOURCE_CHANGE']
def setColor(workbook, sheet, rows, NumColumns, color):
for r in rows:
if r % 100 == 0:
print('index:', r, '/', max(rows))
for c in range(1,NumColumns):
cell = sheet.cell(r, c)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
def setColorFormula(sheet, formula, color, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B2:" + str(column_letter) + str(NumRows)
dxf = DifferentialStyle(font=Font(color=color))
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
def setBorderFormula(sheet, formula, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B1:" + str(column_letter) + str(NumRows)
thin = Side(border_style="thin", color="000000")
border = Border(bottom=thin)
dxf = DifferentialStyle(border=border)
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
# Set border for index
for i in range(1, NumRows + 1):
cell = sheet.cell(i, 1)
cell.border = Border()
return sheet
def sortFilepath(filepathList):
filenameList=[]
folderpathList=[]
for filepath in filepathList:
folderpath, filename, _ = splitFilePath(filepath)
filenameList.append(filename)
folderpathList.append(folderpath)
dates_str = [x.split('_')[-1] for x in filenameList]
dates = [datetime.datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
idx = list(np.argsort(dates))
filepathlistsort=[]
for i in idx:
filepathlistsort.append(folderpathList[i] + '/' + '_'.join(filenameList[i].split('_')[0:-1]) + '_' + dates[i].strftime("%d%m%Y") + '.xlsx')
return filepathlistsort
def sortFolderpath(folderpath, folderpathList):
dates_str = [x.split('_')[-1] for x in folderpathList]
dates = [datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
date_str = folderpath.split('_')[-1]
date = datetime(int(date_str[4:8]), int(date_str[2:4]), int(date_str[0:2]))
idx = list(np.argsort(dates))
folderpathSort=[]
for i in idx:
folderpathSort.append(folderpathList[i])
if dates[i] == date:
break
return folderpathSort
def isNaN(num):
return num != num
def splitFilePath(filepath):
""" Split filepath into folderpath, filename and file extension
:param filepath: Filepath
:type filepath: str
"""
folderpath, _ = ntpath.split(filepath)
head, file_extension = os.path.splitext(filepath)
folderpath, filename = ntpath.split(head)
return folderpath, filename, file_extension
def update_CACS_10StepsGuide(df_CACS, sheet):
for index, row in df_CACS.iterrows():
cell_str = 'AB' + str(index+2)
cell = sheet[cell_str]
cell.value = row['CACS10StepsGuide']
#cell.protection = Protection(locked=False)
return sheet
def mergeITT(df_ITT, df_data):
# Merge ITT table
print('Merge ITT table')
for i in range(len(df_data)):
patient = df_ITT[df_ITT['ID']==df_data.loc[i, 'PatientID']]
if len(patient)==1:
df_data.loc[i, 'ITT'] = patient.iloc[0]['ITT']
df_data.loc[i, 'Date CT'] = patient.iloc[0]['Date CT']
df_data.loc[i, 'Date ICA'] = patient.iloc[0]['Date ICA']
return df_data
def mergeDicom(df_dicom, df_data_old=None):
print('Merge dicom table')
if df_data_old is None:
df_data = df_dicom.copy()
else:
idx = df_dicom['SeriesInstanceUID'].isin(df_data_old['SeriesInstanceUID'])
df_data = pd.concat([df_data_old, df_dicom[idx==False]], axis=0)
return df_data
def mergeTracking(df_tracking, df_data, df_data_old=None):
if df_data_old is None:
df_data = df_data.copy()
df_tracking = df_tracking.copy()
df_data.replace(to_replace=[np.nan], value='', inplace=True)
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
# Merge tracking table
print('Merge tracking table')
df_data['Responsible Person Problem'] = ''
df_data['Date Query'] = ''
df_data['Date Answer'] = ''
df_data['Problem Summary'] = ''
df_data['Results'] = ''
for index, row in df_tracking.iterrows():
patient = row['PatientID']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update 'Problem Summary'
if df_data.loc[indexP, 'Problem Summary']=='':
df_data.loc[indexP, 'Problem Summary'] = row['Problem Summary']
else:
df_data.loc[indexP, 'Problem Summary'] = df_data.loc[indexP, 'Problem Summary'] + ' | ' + row['Problem Summary']
# Update 'results'
if df_data.loc[indexP, 'Results']=='':
df_data.loc[indexP, 'Results'] = row['results']
else:
df_data.loc[indexP, 'Results'] = df_data.loc[indexP, 'Results'] + ' | ' + row['results']
else:
df_data = df_data.copy()
df_data_old = df_data_old.copy()
df_tracking = df_tracking.copy()
df_data.replace(to_replace=[np.nan], value='', inplace=True)
df_data_old.replace(to_replace=[np.nan], value='', inplace=True)
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
l = len(df_data_old)
df_data['Responsible Person Problem'] = ''
df_data['Date Query'] = ''
df_data['Date Answer'] = ''
df_data['Problem Summary'] = ''
df_data['Results'] = ''
df_data['Responsible Person Problem'][0:l] = df_data_old['Responsible Person Problem']
df_data['Date Query'][0:l] = df_data_old['Date Query']
df_data['Date Answer'][0:l] = df_data_old['Date Answer']
df_data['Problem Summary'][0:l] = df_data_old['Problem Summary']
df_data['Results'][0:l] = df_data_old['Results']
for index, row in df_tracking.iterrows():
patient = row['PatientID']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update 'Problem Summary'
if df_data.loc[indexP, 'Problem Summary']=='':
df_data.loc[indexP, 'Problem Summary'] = row['Problem Summary']
else:
if not row['Problem Summary'] in df_data.loc[indexP, 'Problem Summary']:
df_data.loc[indexP, 'Problem Summary'] = df_data.loc[indexP, 'Problem Summary'] + ' | ' + row['Problem Summary']
# Update 'results'
if df_data.loc[indexP, 'Results']=='':
df_data.loc[indexP, 'Results'] = row['results']
else:
if not row['results'] in df_data.loc[indexP, 'Results']:
df_data.loc[indexP, 'Results'] = df_data.loc[indexP, 'Results'] + ' | ' + row['results']
return df_data
def mergeEcrf(df_ecrf, df_data):
# Merge ecrf table
print('Merge ecrf table')
df_data['1. Date of CT scan'] = ''
for index, row in df_ecrf.iterrows():
patient = row['Patient identifier']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update '1. Date of CT scan'
df_data.loc[indexP, '1. Date of CT scan'] = row['1. Date of CT scan']
return df_data
def mergePhase_exclude_stenosis(df_phase_exclude_stenosis, df_data):
# Merge phase_exclude_stenosis
print('Merge phase_exclude_stenosis table')
df_data['phase_i0011'] = ''
df_data['phase_i0012'] = ''
for index, row in df_phase_exclude_stenosis.iterrows():
patient = row['mnpaid']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update tags
if df_data.loc[indexP, 'phase_i0011']=='':
df_data.loc[indexP, 'phase_i0011'] = str(row['phase_i0011'])
else:
df_data.loc[indexP, 'phase_i0011'] = str(df_data.loc[indexP, 'phase_i0011']) + ', ' + str(row['phase_i0011'])
if df_data.loc[indexP, 'phase_i0012']=='':
df_data.loc[indexP, 'phase_i0012'] = str(row['phase_i0012'])
else:
df_data.loc[indexP, 'phase_i0012'] = str(df_data.loc[indexP, 'phase_i0011']) + ', ' + str(row['phase_i0011'])
return df_data
def mergePrct(df_prct, df_data):
# Merge phase_exclude_stenosis
print('Merge prct table')
df_data['other_best_phase'] = ''
df_data['rca_best_phase'] = ''
df_data['lad_best_phase'] = ''
df_data['lcx_best_phase'] = ''
for index, row in df_prct.iterrows():
patient = row['PatientId']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update tags
df_data.loc[indexP, 'other_best_phase'] = row['other_best_phase']
df_data.loc[indexP, 'rca_best_phase'] = row['rca_best_phase']
df_data.loc[indexP, 'lad_best_phase'] = row['lad_best_phase']
df_data.loc[indexP, 'lcx_best_phase'] = row['lcx_best_phase']
return df_data
def mergeStenosis_bigger_20_phase(df_stenosis_bigger_20_phases, df_data):
# Merge phase_exclude_stenosis
print('Merge Stenosis_bigger_20_phase table')
df_data['STENOSIS'] = ''
patientnames = df_stenosis_bigger_20_phases['mnpaid'].unique()
df_stenosis_bigger_20_phases.replace(to_replace=[np.nan], value='', inplace=True)
for patient in patientnames:
patientStenose = df_stenosis_bigger_20_phases[df_stenosis_bigger_20_phases['mnpaid']==patient]
sten = ''
for index, row in patientStenose.iterrows():
art=''
if row['LAD']==1:
art = 'LAD'
if row['RCA']==1:
art = 'RCA'
if row['LMA']==1:
art = 'LMA'
if row['LCX']==1:
art = 'LCX'
if sten =='':
if not art=='':
sten = art + ':' + str(row['sten_i0231 (Phase #1)']) + ':' + str(row['sten_i0241']) + ':' + str(row['sten_i0251'])
else:
if not art=='':
sten = sten + ', ' + art + ':' + str(row['sten_i0231 (Phase #1)']) + ':' + str(row['sten_i0241']) + ':' + str(row['sten_i0251'])
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
df_data.loc[indexP, 'STENOSIS'] = sten
return df_data
def freeze(writer, sheetname, df):
NumRows=1
NumCols=1
df.to_excel(writer, sheet_name = sheetname, freeze_panes = (NumCols, NumRows))
def highlight_columns(sheet, columns=[], color='A5A5A5', offset=2):
for col in columns:
cell = sheet.cell(1, col+offset)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
return sheet
def setAccessRights(sheet, columns=[], promt='', promptTitle='', formula1='"Dog,Cat,Bat"'):
for column in columns:
column_letter = get_column_letter(column+2)
dv = DataValidation(type="list", formula1=formula1, allow_blank=True)
dv.prompt = promt
dv.promptTitle = promptTitle
column_str = column_letter + str(1) + ':' + column_letter + str(1048576)
dv.add(column_str)
sheet.add_data_validation(dv)
return sheet
def setComment(sheet, columns=[], comment=''):
for column in columns:
column_letter = get_column_letter(column+2)
dv = DataValidation()
dv.prompt = comment
column_str = column_letter + str(1) + ':' + column_letter + str(1048576)
dv.add(column_str)
sheet.add_data_validation(dv)
return sheet
def checkTables(settings):
print('Checking existance of required tables.')
# Check if requird tables exist
tables=['filepath_dicom', 'filepath_ITT', 'filepath_ecrf', 'filepath_prct',
'filepath_phase_exclude_stenosis', 'filepath_stenosis_bigger_20_phases', 'filepath_tracking']
for table in tables:
if not os.path.isfile(settings[table]):
raise ValueError("Source file " + settings[table] + ' does not exist. Please copy file in the correct directory!')
return True
def createData(settings, NumSamples=None):
""" Create data columns from dicom metadata
:param settings: Dictionary of settings
:type settings: dict
"""
XA=False
# Extract dicom data
df_dicom = pd.read_excel(settings['filepath_dicom'], index_col=0)
# Reorder datafame
df_dicom = df_dicom[settings['dicom_tags_order']]
if XA:
df_dicom = df_dicom[(df_dicom['Modality']=='CT') | (df_dicom['Modality']=='OT') | (df_dicom['Modality']=='XA')]
else:
df_dicom = df_dicom[(df_dicom['Modality']=='CT') | (df_dicom['Modality']=='OT')]
df_dicom = df_dicom.reset_index(drop=True)
cols = df_dicom.columns.tolist()
cols_new = settings['dicom_tags_first'] + [x for x in cols if x not in settings['dicom_tags_first']]
df_dicom = df_dicom[cols_new]
df_data = df_dicom.copy()
df_data = df_data.reset_index(drop=True)
if NumSamples is not None:
df_data = df_data[0:NumSamples]
# Extract ecrf data
df_ecrf = pd.read_excel(settings['filepath_ecrf'])
df_data = mergeEcrf(df_ecrf, df_data)
# Extract ITT
df_ITT = pd.read_excel(settings['filepath_ITT'], 'Tabelle1')
df_data = mergeITT(df_ITT, df_data)
# Extract phase_exclude_stenosis
df_phase_exclude_stenosis = pd.read_excel(settings['filepath_phase_exclude_stenosis'])
df_data = mergePhase_exclude_stenosis(df_phase_exclude_stenosis, df_data)
# Extract prct
df_prct = pd.read_excel(settings['filepath_prct'])
df_data = mergePrct(df_prct, df_data)
# Extract stenosis_bigger_20_phases
df_stenosis_bigger_20_phases = pd.read_excel(settings['filepath_stenosis_bigger_20_phases'])
df_data = mergeStenosis_bigger_20_phase(df_stenosis_bigger_20_phases, df_data)
# Reoder columns
cols = df_data.columns.tolist()
cols_new = settings['dicom_tags_first'] + [x for x in cols if x not in settings['dicom_tags_first']]
#filepath_master_data = os.path.join(settings['folderpath_components'], 'discharge_master_data_' + settings['date'] + '.xlsx')
#df_data.to_excel(settings['filepath_data'])
df_data.to_pickle(settings['filepath_data'])
def createPredictions(settings):
""" Create prediction columns
:param settings: Dictionary of settings
:type settings: dict
"""
df_data = pd.read_pickle(settings['filepath_data'])
df_pred = pd.DataFrame()
# Filter by CACS based on 10-Steps-Guide
df = filter_CACS_10StepsGuide(df_data)
df_pred['CACS10StepsGuide'] = df['CACS10StepsGuide']
# Filter by CACS based selection
df = filter_CACS(df_data)
df_pred['CACS'] = df['CACS']
# Filter by NCS_CACS and NCS_CTA based on criteria
df = filter_NCS(df_data)
df_pred['NCS_CTA'] = df['NCS_CTA']
df_pred['NCS_CACS'] = df['NCS_CACS']
# Filter by CTA
df = filter_CTA(settings)
df_pred['CTA'] = df['CTA'].astype('bool')
df_pred['CTA_phase'] = df['phase']
df_pred['CTA_arteries'] = df['arteries']
df_pred['CTA_source'] = df['source']
# Filter by ICA
df = pd.DataFrame('', index=np.arange(len(df_pred)), columns=['ICA'])
df_pred['ICA'] = df['ICA']
# Filter by reconstruction
df = filterReconstruction(df_data, settings)
df_pred['RECO'] = df['RECO']
# Predict CLASS
classes = ['CACS', 'CTA', 'NCS_CTA', 'NCS_CACS']
for i in range(len(df_pred)):
if i % 1000 == 0:
print('index:', i, '/', len(df_pred))
value=''
for c in classes:
if df_pred.loc[i, c]:
if value=='':
value = value + c
else:
value = value + '+' + c
if value == '':
value = 'UNDEFINED'
df_pred.loc[i, 'CLASS'] = value
# Save predictions
df_pred.to_pickle(settings['filepath_prediction'])
def updateRFClassification(folderpath_master, folderpath_master_before):
""" Update random forest classification
:param settings: Dictionary of settings
:type settings: dict
"""
date = folderpath_master.split('_')[-1]
folderpath_components = os.path.join(folderpath_master, 'discharge_components_' + date)
filepath_rfc = os.path.join(folderpath_components, 'discharge_rfc_' + date + '.xlsx')
folderpath_master_before_list = glob(folderpath_master_before + '/*master*')
folderpath_master_before_list = sortFolderpath(folderpath_master, folderpath_master_before_list)
filepathMasters = glob(folderpath_master_before_list[-2] + '/*process*.xlsx')
date_before = folderpath_master_before_list[-2].split('_')[-1]
df_master = pd.read_excel(filepathMasters[0], sheet_name='MASTER_' + date_before)
columns = ['RFCLabel', 'RFCClass', 'RFCConfidence']
df_rfc = pd.DataFrame('UNDEFINED', index=np.arange(len(df_master)), columns=columns)
df_rfc[columns] = df_master[columns]
df_rfc.to_excel(filepath_rfc)
def createManualSelection(settings):
""" Create manual selection columns
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create manual selection')
#df_data = pd.read_excel(settings['filepath_data'], index_col=0)
df_data = pd.read_pickle(settings['filepath_data'])
df_manual0 = pd.DataFrame('UNDEFINED', index=np.arange(len(df_data)), columns=['ClassManualCorrection'])
df_manual1 = pd.DataFrame('', index=np.arange(len(df_data)), columns=['Comment'])
df_manual2 = pd.DataFrame('', index=np.arange(len(df_data)), columns=['Responsible Person'])
df_manual3 = pd.DataFrame('UNDEFINED', index=np.arange(len(df_data)), columns=['Image Quality'])
df_manual = pd.concat([df_manual0, df_manual1, df_manual2, df_manual3], axis=1)
#df_manual.to_excel(settings['filepath_manual'])
df_manual.to_pickle(settings['filepath_manual'])
def createTrackingTable(settings):
""" Create tracking table
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create tracking table')
df_track = pd.DataFrame(columns=settings['columns_tracking'])
df_track.to_pickle(settings['filepath_master_track'])
# Update master
writer = pd.ExcelWriter(settings['filepath_master'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'TRACKING' + '_' + settings['date']
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient ro master
df_track.to_excel(writer, sheet_name=sheet_name)
writer.save()
print('Update tracking table')
# Read tracking table
df_tracking = pd.read_excel(settings['filepath_tracking'], 'tracking table')
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
df_track = pd.read_excel(settings['filepath_master'], 'TRACKING_' + settings['date'], index_col=0)
columns_track = df_track.columns
columns_tracking = df_tracking.columns
#columns_union = ['ProblemID', 'PatientID', 'Problem Summary', 'Problem']
columns_union = columns_track
if len(df_track)==0:
ProblemIDMax=-1
df_track = df_tracking[columns_union]
else:
ProblemIDMax = max([int(x) for x in list(df_track['ProblemID'])])
ProblemIDInt = 0
for index, row in df_tracking.iterrows():
ProblemID = row['ProblemID']
if not ProblemID == '':
index = df_track['ProblemID'][df_track['ProblemID'] == ProblemID].index[0]
for col in columns_union:
df_track.loc[index,col] = row[col]
else:
ProblemIDInt = ProblemIDMax + 1
ProblemIDMax = ProblemIDInt
row['ProblemID'] = str(ProblemIDInt).zfill(6)
row_new = pd.DataFrame('', index=[0], columns=columns_union)
for col in columns_union:
row_new.loc[0,col] = row[col]
df_track = df_track.append(row_new, ignore_index=True)
df_tracking.loc[index,'ProblemID'] = str(ProblemIDInt).zfill(6)
# Update master
writer = pd.ExcelWriter(settings['filepath_master'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'TRACKING' + '_' + settings['date']
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient ro master
df_track.to_excel(writer, sheet_name=sheet_name)
writer.save()
# Update tracking
writer = pd.ExcelWriter(settings['filepath_tracking'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'tracking table'
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient to master
df_tracking.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
def orderMasterData(df_master, settings):
""" Order columns of the master
:param settings: Dictionary of settings
:type settings: dict
"""
# Reoder columns
cols = df_master.columns.tolist()
cols_new = settings['columns_first'] + [x for x in cols if x not in settings['columns_first']]
df_master = df_master[cols_new]
df_master = df_master.sort_values(['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID'], ascending = (True, True, True))
df_master.reset_index(inplace=True, drop=True)
return df_master
def mergeMaster(settings):
""" Merge master file
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create master')
# Read tables
print('Read discharge_data')
df_data = pd.read_pickle(settings['filepath_data'])
print('Read discharge_pred')
df_pred = pd.read_pickle(settings['filepath_prediction'])
df_pred['CTA'] = df_pred['CTA'].astype('bool')
print('Read discharge_reco')
df_reco_load = pd.read_excel(settings['filepath_reco'], index_col=0)
df_reco = pd.DataFrame()
df_reco['RECO'] = df_reco_load['PredClass']
df_reco['RECO_PROP'] = df_reco_load['Prop']
print('Read discharge_rfc')
df_rfc = pd.read_pickle(settings['filepath_rfc'])
print('Read discharge_manual')
df_manual = pd.read_pickle(settings['filepath_manual'])
print('Read discharge_track')
print('Create discharge_master')
df_master = pd.concat([df_data, df_pred, df_rfc, df_manual, df_reco], axis=1)
#df_master = pd.concat([df_data, df_pred, df_rfc, df_manual], axis=1)
writer = | pd.ExcelWriter(settings['filepath_master'], engine="openpyxl", mode="w") | pandas.ExcelWriter |
#!/home/sunnymarkliu/softwares/anaconda3/bin/python
# _*_ coding: utf-8 _*_
"""
@author: SunnyMarkLiu
@time : 17-12-22 下午7:23
"""
from __future__ import absolute_import, division, print_function
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
# remove warnings
import warnings
warnings.filterwarnings('ignore')
import datetime
import numpy as np
import pandas as pd
from pypinyin import lazy_pinyin
from sklearn.preprocessing import LabelEncoder
from conf.configure import Configure
from utils import data_utils
from tqdm import tqdm
def check_last_time_order_info(uid, userid_grouped, flag, check_name, last_time=1):
""" 最近的一次交易的具体信息 check_name """
if flag == 0:
return -1
df = userid_grouped[uid]
if df.shape[0] < last_time:
return -1
else:
return df.iloc[-last_time][check_name]
def pre_days_order_count(uid, userid_grouped, flag, days):
""" 往前 days 的 order 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['days_from_now'] < days]
return df.shape[0]
def pre_days_checkname_diff_count(uid, userid_grouped, flag, days, check_name):
""" 往前 days 的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['days_from_now'] < days]
if df.shape[0] == 0:
return 0
else:
return len(df[check_name].unique())
def year_order_count(uid, userid_grouped, flag, year):
""" 2016年的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
return df.shape[0]
def year_checkname_diff_count(uid, userid_grouped, flag, year, check_name):
""" year 的 order 的不同 check_name 数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
if df.shape[0] == 0:
return 0
else:
return len(df[check_name].unique())
def year_order_month_count(uid, userid_grouped, flag, year):
""" 每年去了几个月份 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
if df.shape[0] == 0:
return 0
else:
return len(df['order_month'].unique())
def year_order_month_most(uid, userid_grouped, flag, year):
""" 每年一个月去的最多的次数 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
df = df.groupby(['order_month']).count()['orderTime'].reset_index()
if df.shape[0] == 0:
return 0
else:
return df['orderTime'].max()
def year_most_order_month(uid, userid_grouped, flag, year):
""" 每年去的最多次数的月份 """
if flag == 0:
return -1
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
df = df.groupby(['order_month']).count()['orderTime'].reset_index()
if df.shape[0] == 0:
return -1
else:
return df.sort_values(by='orderTime', ascending=False)['order_month'].values[0]
def year_good_order_count(uid, userid_grouped, flag, year):
""" 每年精品订单数量 """
if flag == 0:
return 0
df = userid_grouped[uid]
df = df.loc[df['order_year'] == year]
return sum(df['orderType'])
def last_time_checkname_ratio(uid, userid_grouped, flag, check_name):
""" 最后一次 checkname 的占比 """
if flag == 0:
return 0
df = userid_grouped[uid]
last_check_name = df.iloc[-1][check_name]
last_count = df[check_name].tolist().count(last_check_name)
return 1.0 * last_count / df.shape[0]
def build_order_history_features(df, history):
features = pd.DataFrame({'userid': df['userid']})
df_ids = history['userid'].unique()
userid_grouped = dict(list(history.groupby('userid')))
#给trade表打标签,若id在login表中,则打标签为1,否则为0
features['has_history_flag'] = features['userid'].map(lambda uid: uid in df_ids).astype(int)
print("基本特征")
# build_order_history_features2 函数中提交提取,冗余
# 最近的一次交易的 orderType
# features['last_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType', 1), axis=1)
# 倒数第二个 orderType
# features['last_2_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType', 2), axis=1)
# features['last_3_time_orderType'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'orderType',3), axis=1)
# 倒数第二次距离现在的时间
# features['last_2_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now', 2), axis=1)
# features['last_3_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now', 3), axis=1)
# 最近的一次交易的 days_from_now, order_year, order_month, order_day, order_weekofyear, order_weekday
features['last_time_days_from_now'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'days_from_now'), axis=1)
features['last_time_order_year'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_year'), axis=1)
features['last_time_order_month'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_month'), axis=1)
features['last_time_order_day'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_day'), axis=1)
features['last_time_order_weekofyear'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekofyear'), axis=1)
features['last_time_order_weekday'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekday'), axis=1)
features['last_time_continent'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'continent'), axis=1)
features['last_time_country'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'country'), axis=1)
features['last_time_city'] = features.apply(lambda row: check_last_time_order_info(row['userid'], userid_grouped, row['has_history_flag'], 'city'), axis=1)
print("计数特征")
# 往前 90days 的计数特征
features['pre_90days_order_count'] = features.apply(lambda row: pre_days_order_count(row['userid'], userid_grouped, row['has_history_flag'], 90), axis=1)
features['pre_90days_order_continent_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'continent'), axis=1)
features['pre_90days_order_country_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'country'), axis=1)
features['pre_90days_order_city_count'] = features.apply(lambda row: pre_days_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 90, 'city'), axis=1)
features['2016_order_count'] = features.apply(lambda row: year_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2017_order_count'] = features.apply(lambda row: year_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# features['order_count_diff'] = features['2016_order_count'] - features['2017_order_count']
# features['2016_order_continent_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'continent'), axis=1)
# features['2016_order_country_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'country'), axis=1)
# features['2016_order_city_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2016, 'city'), axis=1)
features['2017_order_continent_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'continent'), axis=1)
features['2017_order_country_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'country'), axis=1)
features['2017_order_city_count'] = features.apply(lambda row: year_checkname_diff_count(row['userid'], userid_grouped, row['has_history_flag'], 2017, 'city'), axis=1)
# 是否 2016 年和 2017 年都有 order
features['both_year_has_order'] = features.apply(lambda row: (row['2016_order_count'] > 0) & (row['2017_order_count'] > 0), axis=1).astype(int)
# 每年去了几个月份
features['2016_order_month_count'] = features.apply(lambda row: year_order_month_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2017_order_month_count'] = features.apply(lambda row: year_order_month_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# 每年一个月去的最多的次数
# features['2016_order_month_most'] = features.apply(lambda row: year_order_month_most(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
# features['2017_most_order_month'] = features.apply(lambda row: year_order_month_most(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
# 每年去的最多的月份
# features['2016_most_order_month'] = features.apply(lambda row: year_most_order_month(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
# features['2017_most_order_month'] = features.apply(lambda row: year_most_order_month(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
print('比率特征')
# 用户总订单数、精品订单数、精品订单比例
features['2016_good_order_count'] = features.apply(lambda row: year_good_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2016), axis=1)
features['2016_good_order_ratio'] = features.apply(lambda row: row['2016_good_order_count'] / row['2016_order_count'] if row['2016_order_count'] != 0 else 0, axis=1)
features['2017_good_order_count'] = features.apply(lambda row: year_good_order_count(row['userid'], userid_grouped, row['has_history_flag'], 2017), axis=1)
features['2017_good_order_ratio'] = features.apply(lambda row: row['2017_good_order_count'] / row['2017_order_count'] if row['2017_order_count'] != 0 else 0, axis=1)
features['total_order_count'] = features['2016_order_count'] + features['2017_order_count']
features['total_good_order_count'] = features['2016_good_order_count'] + features['2017_good_order_count']
features['total_good_order_ratio'] = features.apply(lambda row: row['total_good_order_count'] / row['total_order_count'] if row['total_order_count'] != 0 else 0, axis=1)
# has_good_order 强特!!
features['has_good_order'] = (features['total_good_order_ratio'] > 0).astype(int)
features.drop(['2016_good_order_count', '2017_good_order_count', 'total_order_count', 'total_good_order_count'], axis=1, inplace=True)
# cv 变差一点点,不到1个万分点
# print('最后一次 order 的 check_name 的占比') #(未测试)
# features['last_time_order_year_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_year'), axis=1)
# features['last_time_order_month_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_month'), axis=1)
# features['last_time_order_day_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_day'), axis=1)
# features['last_time_order_weekofyear_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekofyear'), axis=1)
# features['last_time_order_weekday_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'order_weekday'), axis=1)
# features['last_time_continent_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'continent'), axis=1)
# features['last_time_country_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'country'), axis=1)
# features['last_time_city_ratio'] = features.apply(lambda row: last_time_checkname_ratio(row['userid'], userid_grouped, row['has_history_flag'], 'city'), axis=1)
return features
def order_last_num(order):
""" 按时间倒序对订单排序 """
users = list(set(order['userid']))
order_c = order.copy()
order_c['order_number'] = 1
for i in tqdm(range(len(users))):
slit_df = order_c[order_c['userid'] == users[i]]
order_c.loc[slit_df.index, 'order_number'] = range(slit_df.shape[0],0,-1)
return order_c
def days_since_prior_order(order):
""" 用户两次订单之间的时间间隔 """
users = list(set(order['userid']))
order_c = order.copy()
order_c['days_since_prior_order'] = np.nan
for i in tqdm(range(len(users))):
slit_df = order_c[order_c['userid'] == users[i]]
time_shift = slit_df['orderTime'].shift(1)
time_series = pd.Series(slit_df['orderTime'].values - time_shift.values).map(lambda x: x/np.timedelta64(1, 's'))/(24*3600.0)
order_c.loc[slit_df.index, 'days_since_prior_order'] = time_series.values
return order_c
def build_time_category_encode(history):
history['orderTime'] = pd.to_datetime(history['orderTime'], unit='s')
# 训练集和测试集最后一天是 2017-09-11
now = datetime.datetime(2017, 9, 12)
history['days_from_now'] = history['orderTime'].map(lambda order: (now - order).days)
history['order_year'] = history['orderTime'].dt.year
history['order_month'] = history['orderTime'].dt.month
history['order_year_month'] = history['order_year'] * 100 + history['order_month']
history['order_day'] = history['orderTime'].dt.day
history['order_weekofyear'] = history['orderTime'].dt.weekofyear
history['order_weekday'] = history['orderTime'].dt.weekday
history['order_hour'] = history['orderTime'].dt.hour
history['order_minute'] = history['orderTime'].dt.minute
history['order_is_weekend'] = history['orderTime'].map(lambda d: 1 if (d == 0) | (d == 6) else 0)
history['order_week_hour'] = history['order_weekday'] * 24 + history['order_hour']
# 按照时间排序
history = history.sort_values(by='orderTime')
history.reset_index(drop=True, inplace=True)
history = order_last_num(history)
history = days_since_prior_order(history)
history['continent'] = history['continent'].map(lambda c: '_'.join(lazy_pinyin(c)) if c == c else 'None')
history['country'] = history['country'].map(lambda c: '_'.join(lazy_pinyin(c)) if c == c else 'None')
history['city'] = history['city'].map(lambda c: '_'.join(lazy_pinyin(c)) if c == c else 'None')
le = LabelEncoder()
le.fit(history['continent'].values)
history['continent'] = le.transform(history['continent'])
le = LabelEncoder()
le.fit(history['country'].values)
history['country'] = le.transform(history['country'])
le = LabelEncoder()
le.fit(history['city'].values)
history['city'] = le.transform(history['city'])
return history
def father_son_order_statistic(uid, userid_grouped, flag):
if flag == 0:
return -1, -1
df = userid_grouped[uid]
if len(set(df['orderTime'])) < df.shape[0]: # 存在子父订单
start = -1
count = 0
for i in range(df.shape[0] - 2):
if df['orderTime'].iat[i] == df['orderTime'].iat[i+1]:
if count == 0:
start = i
count += 1
df = df.iloc[start: start+count]
if df.shape[0] == 0:
return -1, -1
else:
order_type0_count = df[df['orderType'] == 0].shape[0]
order_type1_count = df[df['orderType'] == 0].shape[0]
order_type0_ratio = 1.0* order_type0_count / df.shape[0]
order_type1_ratio = 1.0* order_type1_count / df.shape[0]
return order_type0_ratio, order_type1_ratio
else:
return -1, -1
def year_first_last_order_history_type(uid, history_grouped, flag, year=2017):
""" 每年第一次和最后一次订单的 ordertype """
if flag == 0:
return -1, -1
df = history_grouped[uid]
df = df[df['order_year'] == year]
if df.shape[0] < 1:
return -1, -1
first1_ordertype = df['orderType'].iat[0]
last1_ordertype = df['orderType'].iat[-1]
return first1_ordertype, last1_ordertype
def build_order_history_features2(df, history):
features = pd.DataFrame({'userid': df['userid']})
history_uids = history['userid'].unique()
history_grouped = dict(list(history.groupby('userid')))
#给trade表打标签,若id在login表中,则打标签为1,否则为0
features['has_history_flag'] = features['userid'].map(lambda uid: uid in history_uids).astype(int)
# # 子父订单统计特征
# features['father_son_order_statistic'] = features.apply(lambda row: father_son_order_statistic(row['userid'], history_grouped, row['has_history_flag']), axis=1)
# # features['has_father_son_order'] = features['father_son_order_statistic'].map(lambda x: x[0])
# # features['father_son_order_order_type0_count'] = features['father_son_order_statistic'].map(lambda x: x[1])
# # features['father_son_order_order_type1_count'] = features['father_son_order_statistic'].map(lambda x: x[2])
# features['father_son_order_order_type0_ratio'] = features['father_son_order_statistic'].map(lambda x: x[0])
# features['father_son_order_order_type1_ratio'] = features['father_son_order_statistic'].map(lambda x: x[1])
# del features['father_son_order_statistic']
print('强特:2016_2017_first_last_ordertype')
features['2017_first_last_order_history_type'] = features.apply(lambda row: year_first_last_order_history_type(row['userid'], history_grouped, row['has_history_flag'], year=2017), axis=1)
features['2017_first_order_history_type'] = features['2017_first_last_order_history_type'].map(lambda x: x[0])
features['2017_last_order_history_type'] = features['2017_first_last_order_history_type'].map(lambda x: x[1])
features['2016_first_last_order_history_type'] = features.apply(lambda row: year_first_last_order_history_type(row['userid'], history_grouped, row['has_history_flag'], year=2016), axis=1)
features['2016_first_order_history_type'] = features['2016_first_last_order_history_type'].map(lambda x: x[0])
features['2016_last_order_history_type'] = features['2016_first_last_order_history_type'].map(lambda x: x[1])
features['2016_2017_first_last_ordertype'] = ((features['2016_first_order_history_type'] == 1) | (features['2017_first_order_history_type'] == 1) |
(features['2016_last_order_history_type'] == 1) | (features['2017_last_order_history_type'] == 1)).astype(int)
features.drop(['2017_first_last_order_history_type', '2017_first_order_history_type', '2017_last_order_history_type',
'2016_first_last_order_history_type', '2016_first_order_history_type', '2016_last_order_history_type'], axis=1, inplace=True)
print('每年每个月份订单的统计')
df = history.groupby(by=['userid', 'order_year_month']).count().reset_index()[['userid', 'order_year_month', 'orderid']].rename(columns={'orderid': 'year_month_order_count'})
df = df.pivot('userid', 'order_year_month', 'year_month_order_count').reset_index().fillna(0)
df.columns = df.columns.astype(str)
df.drop(['201709', '201708', '201707', '201701', '201705'], axis=1, inplace=True)
features = features.merge(df, on='userid', how='left')
del features['has_history_flag']
return features
def history_city_hot_statistic(uid, history_grouped, flag, hot_df, column):
if flag == 0:
return -1, -1, -1, -1
df = history_grouped[uid]
citys = df[column].values
hots = []
for c in citys:
hots.append(hot_df[hot_df[column] == c]['hot'].values[0])
hots = np.array(hots)
return np.mean(hots), np.max(hots), np.min(hots), np.std(hots)
def last_order_location_hot(uid, history_grouped, flag, hot_df, column):
if flag == 0:
return -1
df = history_grouped[uid]
last_hot = hot_df[hot_df[column] == df[column].iat[-1]]['hot'].values[0]
return last_hot
def build_order_history_features3(df, orderHistory, history):
""" 热度分析 """
city_hot = orderHistory.groupby(['city']).count()['userid'].reset_index().rename(columns={'userid': 'hot'})
city_hot['hot'] = city_hot['hot'].astype(float) / sum(city_hot['hot'])
country_hot = orderHistory.groupby(['country']).count()['userid'].reset_index().rename(columns={'userid': 'hot'})
country_hot['hot'] = country_hot['hot'].astype(float) / sum(country_hot['hot'])
continent_hot = orderHistory.groupby(['continent']).count()['userid'].reset_index().rename(columns={'userid': 'hot'})
continent_hot['hot'] = continent_hot['hot'].astype(float) / sum(continent_hot['hot'])
features = pd.DataFrame({'userid': df['userid']})
history_uids = history['userid'].unique()
history_grouped = dict(list(history.groupby('userid')))
#给trade表打标签,若id在login表中,则打标签为1,否则为0
features['has_history_flag'] = features['userid'].map(lambda uid: uid in history_uids).astype(int)
# features['history_city_hot_statistic'] = features.apply(lambda row: history_city_hot_statistic(row['userid'], history_grouped, row['has_history_flag'], city_hot, 'city'), axis=1)
# features['history_city_hot_mean'] = features['history_city_hot_statistic'].map(lambda x:x[0])
# features['history_city_hot_max'] = features['history_city_hot_statistic'].map(lambda x:x[1])
# features['history_city_hot_min'] = features['history_city_hot_statistic'].map(lambda x:x[2])
# features['history_city_hot_std'] = features['history_city_hot_statistic'].map(lambda x:x[3])
# del features['history_city_hot_statistic']
# features['history_country_hot_statistic'] = features.apply(lambda row: history_city_hot_statistic(row['userid'], history_grouped, row['has_history_flag'], country_hot, 'country'), axis=1)
# features['history_country_hot_mean'] = features['history_country_hot_statistic'].map(lambda x:x[0])
# features['history_country_hot_max'] = features['history_country_hot_statistic'].map(lambda x:x[1])
# features['history_country_hot_min'] = features['history_country_hot_statistic'].map(lambda x:x[2])
# features['history_country_hot_std'] = features['history_country_hot_statistic'].map(lambda x:x[3])
# del features['history_country_hot_statistic']
# features['history_continent_hot_statistic'] = features.apply(lambda row: history_city_hot_statistic(row['userid'], history_grouped, row['has_history_flag'], continent_hot, 'continent'), axis=1)
# features['history_continent_hot_mean'] = features['history_continent_hot_statistic'].map(lambda x:x[0])
# features['history_continent_hot_max'] = features['history_continent_hot_statistic'].map(lambda x:x[1])
# features['history_continent_hot_min'] = features['history_continent_hot_statistic'].map(lambda x:x[2])
# features['history_continent_hot_std'] = features['history_continent_hot_statistic'].map(lambda x:x[3])
# del features['history_continent_hot_statistic']
# 只有 last_order_city_hot 线上A榜提升
features['last_order_city_hot'] = features.apply(lambda row: last_order_location_hot(row['userid'], history_grouped, row['has_history_flag'], city_hot, 'city'), axis=1)
# features['last_order_country_hot'] = features.apply(lambda row: last_order_location_hot(row['userid'], history_grouped, row['has_history_flag'], country_hot, 'country'), axis=1)
# features['last_order_continent_hot'] = features.apply(lambda row: last_order_location_hot(row['userid'], history_grouped, row['has_history_flag'], continent_hot, 'continent'), axis=1)
del features['has_history_flag']
return features
def multi_order_has_good_order(uid, history_grouped, flag):
""" 多次订单并且有精品的老用户 """
if flag == 0:
return 0
df = history_grouped[uid]
if (df.shape[0] > 1) and sum(df['orderType']) > 0:
return 1
return 0
def build_order_history_features4(df, history):
features = pd.DataFrame({'userid': df['userid']})
history_uids = history['userid'].unique()
history_grouped = dict(list(history.groupby('userid')))
#给trade表打标签,若id在login表中,则打标签为1,否则为0
features['has_history_flag'] = features['userid'].map(lambda uid: uid in history_uids).astype(int)
# 多次订单并且有精品的老用户
features['multi_order_has_good_order'] = features.apply(lambda row: multi_order_has_good_order(row['userid'], history_grouped, row['has_history_flag']), axis=1)
# 多次订单并且都没有精品订单
del features['has_history_flag']
return features
def city_info(df):
""" 城市订单特征 """
df_select = df.groupby(['city']).size().reset_index()
df_select.columns = ['city', 'city_order_num']
df_select['city_order_ratio'] = df_select['city_order_num'] / (1.0 * df.shape[0])
df_select_1 = df[df['orderType'] == 1].groupby(['city']).size().reset_index()
df_select_1.columns = ['city', 'city_high_order_num']
df_select = pd.merge(df_select, df_select_1, on='city', how='left')
df_select['city_high_order_num'] = df_select['city_high_order_num'].fillna(0).astype(int)
df_select['city_high_order_ratio'] = df_select['city_high_order_num'] / (1.0 * df_select['city_order_num'])
del df_select_1
return df_select
def country_info(df):
""" 国家订单特征 """
df_select = df.groupby(['country']).size().reset_index()
df_select.columns = ['country', 'country_order_num']
df_select['country_order_ratio'] = df_select['country_order_num'] / (1.0 * df.shape[0])
df_select_1 = df[df['orderType'] == 1].groupby(['country']).size().reset_index()
df_select_1.columns = ['country', 'country_high_order_num']
df_select = | pd.merge(df_select, df_select_1, on='country', how='left') | pandas.merge |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = | concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"]) | pandas.concat |
#!/usr/bin/env python
# encoding:utf-8
'''sklearn doc
'''
import re
import os
import sys
import numpy as np
import pandas as pd
from time import time
from sklearn.model_selection import GridSearchCV, cross_val_predict
# RandomizedSearchCV cross_val_score train_test_split
from skfeature.function.information_theoretical_based import MRMR
from imblearn.over_sampling import SMOTE
# from sklearn.feature_selection import SelectKBest, mutual_info_classif, mutual_info_regression,f_classif
# from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from joblib import Memory, dump, load
from sklearn import metrics
from pycm import * #swiss-army knife of confusion matrice
from collections import Counter
# from sklearn.base import BaseEstimator,TransformerMixin
# from imblearn.metrics import classification_report_imbalanced
import utils
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg') #UserWarning:
from plotnine import * #ggplot
#Global variables
mem = Memory("./mycache") #A context object for caching a function's return value each time it is called with the same input arguments.
import itertools
# COLORS = 'bgrcmyk' #blue green red itertools.cycle(cmap.colors))
# cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
# 'Dark2', 'Set1', 'Set2', 'Set3',
# 'tab10', 'tab20', 'tab20b', 'tab20c']
cmap = plt.get_cmap('Paired')
COLORS = cmap.colors
from sklearn_pipeline_config import * #SCALERS, Tree_based_CLASSIFIERS, Other_CLASSIFIERS RANDOM_STATE
All_CLASSIFIERS = Tree_based_CLASSIFIERS + Other_CLASSIFIERS
######################## pipeline functions ###################
def plot_tsne(df, Y=None, targets=None, filename='decomposition'):
"""to be fihished
method= ['tsne', 'pca', 'tsvd']
t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results
PCA for dense data or
TruncatedSVD for sparse data
但TSVD直接使用scipy.sparse矩阵,不需要densify操作,所以推荐使用TSVD而不是PCA
"""
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
n_components = min(df.shape) if min(df.shape) <10 else 10
X = TSNE(random_state=RANDOM_STATE, learning_rate=100, n_components=2).fit_transform(df)
pd.DataFrame(X).to_csv(filename + ".tSNE.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
plt.xlabel('tSNE-1')
plt.ylabel('tSNE-2')
plt.title('tSNE')
plt.legend()
fig.savefig(filename + ".tSNE.svg")
#pca
pca = PCA(random_state=RANDOM_STATE, n_components=n_components)
pca.fit(df)
X = pca.transform(df)
pd.DataFrame(X).to_csv(filename + ".pca.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
p1,p2=pca.explained_variance_ratio_[:2]
plt.xlabel('PCA-1 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.ylabel('PCA-2 explained variance ratio: ' + '{:.2f}%'.format(p2))
plt.title('PCA')
plt.legend()
# print("singular_values: ", pca.singular_values_)
fig.savefig(filename + ".pca.svg")
#tSVD
tsvd=TruncatedSVD(random_state=RANDOM_STATE, n_components=n_components)
tsvd.fit(df)
X = tsvd.transform(df)
pd.DataFrame(X).to_csv(filename + ".tSVD.csv")
fig = plt.figure(figsize=(10, 6))
for c, i, target_name in zip('rgb', np.unique(Y), targets):
plt.scatter(X[Y==i, 0], X[Y==i, 1], c=c, label=target_name)
p1,p2=tsvd.explained_variance_ratio_[:2]
plt.xlabel('tSVD-1 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.ylabel('tSVD-2 explained variance ratio: ' + '{:.2f}%'.format(p1))
plt.title('tSVD')
plt.legend()
fig.savefig(filename + ".tSVD.svg")
@mem.cache
def get_data(X_file, y_file):
"""features matrix and metadata group.mf with header and index_col,transform to relative abundance matrix"""
if X_file.endswith("csv"):
X = pd.read_csv(X_file, index_col=0, header=0) # rows =samples ,columns=genes(features)
else:
X = pd.read_csv(X_file, index_col=0, header=0,sep="\t")
if y_file.endswith("csv"):
y = pd.read_csv(y_file, index_col=0, header=0) # rows =samples
else:
y = pd.read_csv(y_file, index_col=0, header=0,sep="\t")
return X, y
def plot_classification_report(dict_report, filename="sklearn",
width=6, heightight=3,dpi=300):
report_df = round(pd.DataFrame(dict_report), 2) #保留2位小数
report_df.to_csv(filename + ".classification_report.csv")
report_df = report_df.loc[report_df.index != 'support',]
report_df.insert(0,'score',report_df.index)
plt_df = report_df.melt(id_vars=['score'], value_vars=report_df.columns[1:])
base_plot=(ggplot(plt_df, aes( y = plt_df.columns[1],x=plt_df.columns[-1])) +
geom_point(aes(fill="factor(variable)"),stat='identity',show_legend=False)+
facet_grid('~score')+ #,scales="free_x"
xlim(0,1)+
theme_bw()+
labs(x="",y="")
)
base_plot.save(filename=filename + '.classification_report.svg', dpi=dpi,width=width, height=heightight)
def report_topN_cv_results(results, n_top=10, filename="report"):
"""输出topn,评估那个标准化和分类器最好,用gridsearch"""
labels = []
mean_train_score=[]
mean_test_score=[]
std_test_score=[]
mean_fit_time=[]
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
labeldict={key:value.__class__.__name__ for key, value in results['params'][candidate].items()}
label = "_".join(labeldict[k] for k in ["scl","clf"])
labels.append(label)
mean_train_score.append(results['mean_train_score'][candidate])
mean_test_score.append(results['mean_test_score'][candidate])
std_test_score.append(results['std_test_score'][candidate])
mean_fit_time.append(results['mean_fit_time'][candidate])
df = pd.DataFrame.from_dict(
dict(zip(['label','mean_train_score', 'mean_test_score', 'std_test_score', 'mean_fit_time'],
(labels, mean_train_score, mean_test_score, std_test_score, mean_fit_time))))
df.to_csv(filename + ".top{}.cv_results.csv".format(n_top) , index=False)
fig = plt.figure(figsize=(12,5)) #fig size
# plt.grid(which='major', axis='both')
# You should use add_axes if you want exact control of the figure layout. eg.
left = max([len(label) for label in labels])*0.008
bottom, width, height=[0.2, 0.5, 0.7]
ax = fig.add_axes([left, bottom, width, height]) #axes position
ax.barh(labels, mean_test_score, xerr=std_test_score, align='center', color=COLORS, ecolor='black')
# ax.set_title("Compare the different scalers")
ax.set_xlabel('Classification accuracy')
# ax.set_ylabel('') #Different scalers
ax.set_yticklabels(labels)
ax.autoscale()
fig.savefig(filename + ".top{}.cv_results.svg".format(n_top))
def csv2pycm_report(cm_csv):
"""readin cfm csv file and output report for multiple matrics"""
df = pd.read_csv(cm_csv, index_col=0, header=0)
matrix = df.T.to_dict()
cm=ConfusionMatrix(matrix=matrix)
cm.save_html(cm_csv + ".report")
cm.save_csv(cm_csv + '.report')
def plot_confusion_matrix(cfm_df, filename="confusionmatrix", cmap=plt.cm.Blues, accuracy=None):
"""or plt.cm.gray"""
cfm_df.to_csv(filename + ".csv")
labels = list(cfm_df.columns)
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
cfm_df_norm = cfm_df.astype('float') / cfm_df.sum(axis=1)
# cax = ax.matshow(cfm_df, cmap=cmap)
ax.imshow(cfm_df, interpolation='nearest', cmap=cmap)
# ax.set_title("Accuracy: " + accuracy) # plt.title('title test',fontsize=12,color='r')
ax.xaxis.set_ticks_position('bottom')
if isinstance(labels,list):
ax.set(xticks=np.arange(cfm_df.shape[1]+1)-.5,
yticks=np.arange(cfm_df.shape[0]+1)-.5,
# ... and label them with the respective list entries
yticklabels=labels,
title="Accuracy: " + accuracy,
ylabel='True label',
xlabel='Predicted label')
ax.tick_params(length=.0)
# plt.xlabel('Predicted label')
# plt.ylabel('True label')
# ax.legend().set_visible(False) #no legend
ax.set_xticklabels(labels, rotation=45)
fmt = '.2f'
thresh = 0.4 # max(cfm_df.max()) / 2.
for i, j in itertools.product(range(cfm_df.shape[0]), range(cfm_df.shape[1])):
ax.text(j, i+0.1, format(cfm_df_norm.iloc[i, j], fmt),
horizontalalignment="center",
color="white" if cfm_df_norm.iloc[i, j] > thresh else "black")
ax.text(j, i-0.1, cfm_df.iloc[i, j],
horizontalalignment="center",
color="white" if cfm_df_norm.iloc[i, j] > thresh else "black")
plt.tight_layout()
fig.savefig(filename + ".svg")
def plot_mean_test_scores(labels,mean_test_score,error,filename):
"""评估那个标准化和特征筛选最好,用gridsearch"""
fig = plt.figure(figsize=(8,4)) #fig size
plt.grid(which='major', axis='both')
# You should use add_axes if you want exact control of the figure layout. eg.
left = max([len(label) for label in labels])*0.01 +0.1
bottom, width, height=[0.2, 0.5, 0.7]
ax = fig.add_axes([left, bottom, width, height]) #axes position
ax.barh(labels, mean_test_score, xerr=error, align='center', color=COLORS, ecolor='black')
# ax.set_title("Compare the different scalers")
ax.set_xlabel('Classification accuracy')
# ax.set_ylabel('') #Different scalers
ax.set_yticklabels(labels)
ax.autoscale()
fig.savefig(filename)
def plot_feature_importance(df, k, filename):
"""plot feature importance for LGBMClassifier
column0 features, column_1 importance
"""
fig = plt.figure(figsize=(6,8))
# plt.grid(which='major', axis='both')
left = max([len(label) for label in df.iloc[:,0]])*0.01 +0.1
bottom, width, height=[0.1, 1-left-0.1, 0.85]
indices_of_top_k = np.sort(np.argpartition(np.array(df.iloc[:,1]), -k)[-k:])
#np.argpartition: index, all smaller elements will be moved before it and all larger elements behind it
#argpartition的效率比较高,据说是O(n) index 操作
df = df.iloc[indices_of_top_k,].sort_values(by='importance')
ax = fig.add_axes([left, bottom, width, height]) #axes position
ax.barh(df.iloc[:,0],df.iloc[:,1])
ax.set_ylim(-0.5,k-0.5)
ax.set_xlim(0,max(df.importance)*1.1)
for i, v in enumerate(df.iloc[:,1]):
ax.text(v, i, '{0:0.2f}'.format(v), fontsize=8,
horizontalalignment='left',verticalalignment='center')
ax.set_xlabel("Feature importance")
fig.savefig(filename)
top_k_feature_names = df.feature_names
return top_k_feature_names
def plot_coefficients(df, topk=20, filename="filename"):
"""coefficients dataframe """
# Access the classes
df = df.reindex(df.abs().sum(axis=1).sort_values(ascending=False).index).head(topk)
classes = df.columns
n_classes = len(classes)
df = df.sort_values(by=classes[0])
fig,axes=plt.subplots(1,n_classes, sharey = True)
if n_classes==1:
axes=[axes]
fig.set_size_inches(3*n_classes+ 1, 8)
# fig.suptitle("Coefficient of the features")
fontsize = "x-large" #if n_classes !=1 else "large"
for i in range(n_classes):
# Access the row containing the coefficients for this class
class_coef = df.iloc[:,i]
# sort_idx = np.argsort(class_coef)
colors = [COLORS[7] if c < 0 else COLORS[3] for c in class_coef]
yticks = np.arange(len(class_coef))
axes[i].barh(yticks, class_coef, color=colors)#
# feature_names = np.array(feature_names)
# Here I corrected the start to 0 (Your code has 1, which shifted the labels)
axes[i].tick_params(axis = 'both', labelsize = fontsize) #which = 'minor',
axes[i].set_yticks(yticks)
axes[i].set_yticklabels(list(df.index)) # rotation=60,fontsize=fontsize ha="right"
axes[i].set_title(classes[i],fontsize='xx-large')
axes[i].set_ylim(-0.6, len(class_coef)-0.4) #bottom: float, top: float
fig.text(0.5, 0.04, 'Coefficient of the features', ha='center',fontsize='xx-large')
#'medium', 'large' 'x-large', 'xx-large'
fig.savefig(filename + ".coef.svg")
# np.logspace(2, 6, 6, base=2)
def plot_bin_roc_curve(y_test, y_score,class_names,filename):
"""Score(pred)表示每个测试样本属于正样本的概率,从高到低,依次将“Score”值作为阈值threshold,
当测试样本属于正样本的概率大于或等于这个threshold时,我们认为它为正样本,否则为负样本。
每次选取一个不同的threshold,我们就可以得到一组FPR和TPR,即ROC曲线上的一点。
当我们将threshold设置为1和0时,分别可以得到ROC曲线上的(0,0)和(1,1)两个点。
将这些(FPR,TPR)对连接起来,就得到了ROC曲线。当threshold取值越多,ROC曲线越平滑。
多分类的可以每个标签绘制一条ROC曲线
一图一表原则,样品mf命名以数字开头0_no, 1_CD,这样自动0是阴性样品,1是阳性样本
"""
fpr, tpr, _ = metrics.roc_curve(y_test, y_score)
roc_auc = metrics.auc(fpr,tpr)
df = pd.DataFrame.from_dict(
dict(zip(['classes', 'fpr', 'tpr', 'auc'],
(class_names[1], fpr, tpr, roc_auc))))
df.to_csv(filename + ".roc_curve.csv", index=False)
fig = plt.figure(figsize=(8,8))
lw=2
ax = fig.add_subplot(111)
plt.grid(which='major', axis='both')
ax.plot(fpr, tpr, color='darkorange',
lw=2, label='{0} (area ={1:0.2f})'.format(class_names[1],roc_auc)) ###假正率为横坐标,真正率为纵坐标做曲线
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right",fancybox=True, framealpha=0.8, fontsize=6)
fig.savefig(filename + ".roc_curve.svg")
# fpr, tpr, thresholds = roc_curve(Y[test], probas_[:, 1]) 二分类
#roc_auc: GridSearch(est, param_grid, scoring='roc_auc')
# auc_score, by setting the new scoring parameter to roc_auc: GridSearch(est, param_grid, scoring='roc_auc'). It will do the right thing and use predict_proba (or decision_function if predict_proba is not available).
def plot_multi_roc_curve(y_test, y_score,classes,filename):
""" Learn to predict each class against the other
Compute ROC curve and ROC area for each class
classes order same as y_test
"""
from scipy import interp
# 计算每一类的ROC
n_classes=len(classes)
fpr = dict()
tpr = dict()
roc_auc = dict()
dfs = []
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
df = pd.DataFrame.from_dict(
dict(zip(['classes', 'fpr', 'tpr', 'auc'],
(classes[i], fpr[i], tpr[i], roc_auc[i]))))
dfs.append(df)
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
df = pd.DataFrame.from_dict(
dict(zip(['classes', 'fpr', 'tpr', 'auc'],
('micro', fpr["micro"], tpr["micro"], roc_auc["micro"]))))
dfs.append(df)
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
df = pd.DataFrame.from_dict(
dict(zip(['classes', 'fpr', 'tpr', 'auc'],
('macro', fpr["macro"], tpr["macro"], roc_auc["macro"]))))
dfs.append(df)
concat_dfs = pd.concat(dfs)
concat_dfs.to_csv(filename + ".roc_curve.csv", index=False)
# Plot all ROC curves
lw=2
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro_average (area = {0:0.2f})'.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=lw)
plt.plot(fpr["macro"], tpr["macro"],
label='macro_average (area = {0:0.2f})'.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=lw)
colors = COLORS[:n_classes]
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label=' {0} (area = {1:0.2f})'.format(classes[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid(b=True, ls=':')
plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=8)
plt.savefig(filename + ".roc_curve.svg")
######################## pipeline functions END ###################
class SklearnPipeline():
def __init__(self, X_filename, X, Y, log="SklearnPipeline.log", outdir="./"):
"""load the feature matrix(X) and maping file(Y)
X should be normalized or relative transformed
"""
self.outdir = os.path.abspath(outdir)
self.filename = os.path.join(self.outdir, X_filename)
utils.create_dir(self.outdir)
self.logger = utils.create_logger(log)
self.logger.info("Start reading data from {}".format(X_filename))
self.X, self.Y = X, Y
self.Y = self.Y.loc[self.X.index,:] #sort and select samples
self.target_names=[re.sub(r" +", "_",name) for name in np.unique(self.Y.values)] #sorted in same order of 0 1 2 labels
self.label_counts = Counter(self.Y.iloc[:,0])
self.logger.info('Finish loading data from {}, dimension is {}, \n \t\t\t label counts {}'.format(
X_filename, self.X.shape, self.label_counts))
self.stats = []
self.stats.append(("original_dim", self.X.shape))
@utils.tryExcept
def filter_low_prevalence_features(self, prevalence=0.2, to_relative=False):
"""pd.DataFrame: rows = feaures
feature组内覆盖度最大的要大于prevalence, 保证特征在该覆盖度下可以代表该组,防止过滤掉组内特性富集的features
可以每个组内过滤低覆盖度的,group-specific filter, for update later
OTU counts were converted to relative abundances, filtered at a minimum of 10% prevalence across samples
稀疏矩阵如何过滤。
"""
self.X = self.X.loc[self.X.sum(axis=1)>0 , self.X.sum(axis=0)>0] #del 0 featrues and 0 samples
if prevalence !=0:
within_class_prevalence =[np.sum(self.X[self.Y.iloc[:,0].values==k]>0, axis=0)/v for k,v in self.label_counts.items()]
# features within_class prevalence for each label list of n_class Series
if to_relative :
self.X = (self.X.T / self.X.sum(axis=1)).T # transform to relative abundance matrix, rows are samples
self.X = self.X.loc[:, pd.DataFrame(within_class_prevalence).max() > prevalence] #filter low within class prevalence features
self.X = self.X.loc[self.X.sum(axis=1)>0 ,:] # filter 0 samples 间接删除离群样本
self.X.to_csv(self.filename + ".filter_{}_prevalence.csv".format(prevalence))
self.Y = self.Y.loc[self.X.index,:] #sort and select samples after feature selection
self.Y = LabelEncoder().fit_transform(self.Y.values.ravel())
self.logger.info("Filtered the features with max within_class prevalence lower than {}, dimension is {}".format(prevalence, self.X.shape))
self.stats.append(("prevalence_{}_dim".format(prevalence), self.X.shape))
@utils.tryExcept
def mrmr_feature_select(self, n_selected_features=50):
"""
Brown, <NAME> al. "Conditional Likelihood Maximisation: A Unifying Framework for Information Theoretic Feature Selection." JMLR 2012
select features index[0] is the most important feature
j_cmi: basic scoring criteria for linear combination of shannon information term
j_cmi=I(f;y)-beta*sum_j(I(fj;f))+gamma*sum(I(fj;f|y)) conditional mutual information mrmr gama=0
互信息(Mutual Information)是度量两个事件集合之间的相关性(mutual dependence)。互信息是点间互信息(PMI)的期望值
MIfy: mutual information between selected features and response y
"""
# plot_tsne(self.X,Y=self.Y,targets=self.target_names, filename=self.filename +'.before_mrmr_feature_selection')
n_samples, n_features = self.X.shape
x=np.array(self.X)
if n_selected_features and n_features > n_selected_features:
# filter half more features or select 50 features int(n_features*percent) #
# self.logger.info("selecting {} features using mrmr".format(num_fea))
idx, j_cmi, MIfy = MRMR.mrmr(x, self.Y, n_selected_features=n_selected_features)
else:
idx, j_cmi, MIfy = MRMR.mrmr(x, self.Y) #select automatically may still remain many features or
num_fea = len(idx)
# obtain the dataset on the selected features
self.features = self.X.columns[idx].values
mrmr_report = pd.DataFrame({"features":self.features, "j_cmi":j_cmi, "MIfy": MIfy}, columns=['features', 'j_cmi', 'MIfy'])
mrmr_report = mrmr_report.sort_values('MIfy',ascending=False)
mrmr_report.to_csv(self.filename + ".mrmr_features.report.csv",index=False)
self.X = self.X.iloc[:,idx] #select mrmr features
sel_bools = self.X.sum(axis=1)!=0 # filter all 0 rows samples.
self.X = self.X[sel_bools]
self.Y = self.Y[sel_bools]
self.X.to_csv(self.filename + ".mrmr_sel_features.csv")
self.logger.info("Selected {} features using mrmr".format(num_fea))
self.stats.append(("mrmr_dim", self.X.shape))
# plot_tsne(self.X,Y=self.Y,targets=self.target_names, filename=self.filename +'.after_mrmr_feature_selection')
@utils.tryExcept
def over_sampling(self):
"""Over-sampling the minority class for imbalance data using SMOTE
https://www.analyticsvidhya.com/blog/2017/03/imbalanced-classification-problem/
The main objective of balancing classes is to either
increasing the frequency of the minority class or
decreasing the frequency of the majority class.
Over-Sampling increases the number of instances in the minority class by
randomly replicating them in order to present a higher representation of the minority class in the sample
Disadvantages
It increases the likelihood of overfitting since it replicates the minority class events.
In most cases, synthetic techniques like SMOTE and MSMOTE will outperform the conventional oversampling and undersampling methods
For better results, one can use synthetic sampling methods like SMOTE and MSMOTE
along with advanced boosting methods like Gradient boosting and XG Boost.
X G Boost is generally a more advanced form of Boosting and takes care of imbalanced data set by balancing it in itself
try XG boosting on the imbalanced data directly set to get better results.
"""
class_sample_count = Counter(self.Y)
self.stats.append(("ori_class_sample_count", class_sample_count))
isbalanced = len(set(class_sample_count.values()))
if isbalanced ==1:
self.logger.info('The dataset is balanced with class_sample_count {}'.format(class_sample_count))
self.features = self.X.columns
else:
self.logger.info('Dataset shape {} before over sampling'.format(class_sample_count))
sm = SMOTE(random_state=RANDOM_STATE)
self.features = self.X.columns
self.X, self.Y = sm.fit_resample(self.X, self.Y)
self.X = pd.DataFrame(self.X,columns=self.features)
self.stats.append(("smote_class_sample_count", Counter(self.Y)))
self.logger.info('Over sampled dataset with SMOTE, shape {}'.format( Counter(self.Y) ))
@utils.tryExcept
def select_best_scl_clf(self, SCALERS, Tree_based_CLASSIFIERS, Other_CLASSIFIERS,
scoring= 'accuracy', outer_cv=10, inner_cv=5, n_jobs=1, search=0):
"""选最好的标准化方法和classifier (default parameters) 组合
Each sample i.e. each row of the data matrix
X = X_full[:, [0,1]] #根据前两个特征判断那种数据转化效果好, 用pca后可视化进行比较
数据转化过滤中同时考虑binarizer 和 abundance based的数据哪种转化方式更能提高分类效果,并考虑分类样本均衡的问题。
Compare the effect of different scalers on data with outliers
https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#sphx-glr-auto-examples-preprocessing-plot-all-scaling-py
test one model with multiple scalers
This example uses different scalers, transformers, and normalizers to bring the data within a pre-defined range
In general, learning algorithms benefit from standardization of the data set.
If some outliers are present in the set, robust scalers or transformers are more appropriate.
The behaviors of the different scalers, transformers, and normalizers on a dataset containing marginal outliers
is highlighted in Compare the effect of different scalers on data with outliers.
A notable exception are decision tree-based estimators that are robust to arbitrary scaling of the data.
"""
self.search = search
scls=[SCALER[0] for SCALER in SCALERS]
clfs = [clf[0].__class__.__name__ for clf in Other_CLASSIFIERS]
trees = [clf[0].__class__.__name__ for clf in Tree_based_CLASSIFIERS]
if search:
self.logger.info('Select the best tree-based classifiers: {} \n \t\t\t and combination of scalers: {} \n \t\t\t and classifiers: {} \n \t\t\t Tune each classifier with GridSearchCV'.format(trees, scls, clfs))
else:
self.logger.info('Select the best tree-based classifiers: {} \n \t\t\t and combination of scalers: {} \n \t\t\t and classifiers: \n \t\t\t with default parameters'.format(trees, scls, clfs))
# search best combination with scalars and clfs with default params
PARAM_GRID = [
{
'scl': [NonScaler()],
'clf': [clf[0] for clf in Tree_based_CLASSIFIERS]
},
{
'scl': [scl[1] for scl in SCALERS],
'clf': [clf[0] for clf in Other_CLASSIFIERS],
}
]
# search best combination with scalars and hyper-parameters tuned clfs
PARAM_GRID_SEARCH =[
dict({ 'scl': [NonScaler()],
'clf': [clf[0]],
**clf[1] }) for clf in Tree_based_CLASSIFIERS
] + [
dict({ 'scl': [scl[1] for scl in SCALERS],
'clf': [clf[0]],
**clf[1] }) for clf in Other_CLASSIFIERS
]
pipeline = Pipeline(memory=mem, # memory: store the transformers of the pipeline
steps= [
('scl', NonScaler()),
('clf', GaussianNB())
])
if search:
param_grid = PARAM_GRID_SEARCH # {'scaler': [SCALER[1] for SCALER in SCALERS]} #grid 自动化并行比较多个scalers
else:
param_grid = PARAM_GRID
grid_search = GridSearchCV(pipeline, param_grid=param_grid,
scoring = scoring, iid=False, cv =inner_cv,
n_jobs=n_jobs, verbose=1, return_train_score=True)
grid_search.fit(self.X, self.Y)# Train the classifier with balanced data ; fit did nothing
self.grid_search = grid_search
self.scoring = scoring
#通过交叉验证获得预测值
self.y_pred = cross_val_predict(grid_search, self.X, self.Y, cv=outer_cv, n_jobs=1) #n_job= -1 会出错,和inver_cv冲突
#for each element in the input, the prediction that was obtained for that element when it was in the test set.
#In each iteration, label of i'th part of data gets predicted. In the end cross_val_predict merges all partially predicted labels and returns them as the final result.
self.accuracy_score = '%0.2f' % metrics.accuracy_score(self.Y, self.y_pred) #balanced_accuracy_score(y_true, y_pred)
self.best_estimator_ = grid_search.best_estimator_ #best estimator based on Mean accuracy of self.predict(X) wrt. y
self.best_clf = self.best_estimator_.named_steps["clf"]
self.scl_name = self.best_estimator_.named_steps["scl"].__class__.__name__
self.clf_name = self.best_clf.__class__.__name__
if not search:
report_topN_cv_results(grid_search.cv_results_,n_top=10,filename=self.filename)
#save cv_results
df=pd.DataFrame(grid_search.cv_results_)
df.to_csv(self.filename + ".all.cv_results.csv", index=False)
@utils.tryExcept
def hypertune_best_classifier(self, All_CLASSIFIERS, pltcfm=True, outer_cv=10, inner_cv=5,n_jobs=1):
"""compare classifiers by nested Cross-Validation
hypertune best classifier
RandomizedSearchCV 来优化胜出的分类器
n_components == min(n_samples, n_features)[defult]
n_components=0.85
variance that needs to be explained is greater than the percentage
There are more sophisticated ways to choose a number of components,
of course, but a rule of thumb of 70% - 90% is a reasonable start.
"""
if self.search:
'no need to tune again'
best_hypertuned_clf = self.best_clf
grid_search = self.grid_search
else:
self.logger.info('Hypertune the best classifier {} with GridSearchCV'.format(self.clf_name))
# cross prediction do not need to split the data
#X_train, X_test, y_train, y_test = train_test_split(self.X, self.Y, random_state=RANDOM_STATE)
best_clf_index = [i[0] for i in All_CLASSIFIERS].index(self.best_clf)
param_grid = All_CLASSIFIERS[best_clf_index][1]
grid_search = GridSearchCV(self.best_estimator_, param_grid=param_grid,
scoring= self.scoring, iid=False, #[independently identically distributed across the folds] return the average score across folds
cv=inner_cv, #inner_cv train/validation dataset default 3
n_jobs=n_jobs,verbose=1,return_train_score=True) #Mean accuracy of self.predict(X) wrt. y
grid_search.fit(self.X, self.Y) # Train the classifier with balanced data
self.y_pred = cross_val_predict(grid_search, self.X, self.Y, cv=outer_cv) #outer_cv
self.accuracy_score = '%0.2f' % metrics.accuracy_score(self.Y, self.y_pred) #balanced_accuracy_score(y_true, y_pred)
self.best_estimator_ = grid_search.best_estimator_
best_hypertuned_clf = grid_search.best_estimator_.named_steps["clf"]
#save cv_results
df=pd.DataFrame(grid_search.cv_results_)
df.to_csv(self.filename + ".{}.hypertuned.cv_results.csv".format(self.clf_name), index=False)
self.logger.info("Best optimized classifier: {} , Accuracy:{}, Best Param:{}".format(
self.clf_name, self.accuracy_score, grid_search.best_params_))
self.stats.append(("best_estimator", {k:v.__class__.__name__ for k,v in grid_search.best_estimator_.named_steps.items()}))
self.stats.append(('hypertuned_best_parameters', grid_search.best_params_))
self.stats.append(('hypertuned_best_score_{}'.format(self.scoring), '%0.2f' % grid_search.best_score_)) #mean_test_score
self.stats.append(('hypertuned_accuracy', self.accuracy_score)) #refit all samples score
#plot hypertuned classification report
report = metrics.classification_report(self.Y, self.y_pred, target_names=self.target_names, output_dict=True)
filename = self.filename + ".{}.hypertuned".format(self.clf_name)
plot_classification_report(report, filename=filename)
#save model
modelf = self.filename + ".{}_{}.model.z".format(self.scl_name, self.clf_name)
dump(self.best_estimator_, modelf)
# clf = load(modelf)
if pltcfm:
"""plot cunfusion matrix"""
cfmf=self.filename + '.{}.hypertuned.confusion_matrix'.format(self.clf_name)
cfm_html = self.filename + '.{}.hypertuned.PyCM_report'.format(self.clf_name)
dic=dict(zip(np.unique(self.Y),self.target_names))
actual_vector = [dic[i] for i in self.Y]
predict_vector = [dic[i] for i in self.y_pred]
cm = ConfusionMatrix(actual_vector=actual_vector, predict_vector=predict_vector) # pycm
cm.save_html(cfm_html) # cross prediction result
cfm = metrics.confusion_matrix(self.Y, self.y_pred)
cfm_df=pd.DataFrame(cfm, columns=self.target_names,index=self.target_names)
plot_confusion_matrix(cfm_df, filename=cfmf, accuracy = self.accuracy_score)
#roc_auc_score = metrics.roc_auc_score(y_test, self.y_pred) #roc 不支持多分类
# plot roc courve
# Yet this only counts for SVC where the distance to the decision plane is used to compute the probability - therefore no difference in the ROC.
# refit all overfit
y_proba = None
if hasattr(best_hypertuned_clf, "decision_function"):
y_proba = grid_search.decision_function(self.X)
# decision_function, finds the distance to the separating hyperplane.
# y_proba = cross_val_predict(grid_search, self.X, self.Y, cv=outer_cv, method='decision_function')
elif hasattr(best_hypertuned_clf, "predict_proba"):
# predict_proba is a method of a (soft) classifier outputting the probability of the instance being in each of the classes.
# y_proba = cross_val_predict(grid_search, self.X, self.Y, cv=outer_cv, method='predict_proba')
y_proba = grid_search.predict_proba(self.X)[:, 1]
if y_proba is not None: # elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if len(self.target_names)==2:
plot_bin_roc_curve(self.Y, y_proba, self.target_names, filename)
else:
y_test = LabelBinarizer().fit_transform(self.Y)
plot_multi_roc_curve(y_test, y_proba, self.target_names, filename)
#plot topK important features and tsne scatter
n_features = self.X.shape[1]
k = 20 if n_features > 20 else n_features
if hasattr(best_hypertuned_clf, "feature_importances_"):
"""plot feature importance"""
feature_importances = best_hypertuned_clf.feature_importances_
csv = filename + ".feature_importance.csv"
df = pd.DataFrame({
'feature_names': self.features,
'importance': feature_importances,
})
df.sort_values(by='importance',ascending=False).to_csv(csv, index=False)
svg = filename + ".top{}.feature_importance.svg".format(k)
top_k_feature_names = plot_feature_importance(df, k, svg)
# topk_X = self.X[top_k_feature_names]
# topk_X = topk_X.loc[topk_X.sum(axis=1)>0, : ] #del 0 samples and 0 featrues topk_X.sum(axis=0)>0
# topK_Y = self.Y[[i for i,j in enumerate(self.X.index) if j in topk_X.index]] #sort and select samples sel_bools = self.X.sum(axis=1)!=0 #
# plot_tsne(topk_X,Y=topK_Y,targets=self.target_names, filename=self.filename +'.{}.hypertuned.top{}.important_features'.format(self.clf_name, k))
elif hasattr(best_hypertuned_clf, "coef_"):
coef =best_hypertuned_clf.coef_ # As the second of the categories is the Yes category
#https://www.displayr.com/how-to-interpret-logistic-regression-coefficients/
if coef.shape[0]==1:
df=pd.DataFrame(coef.reshape(1,-1),index=[self.target_names[1]], columns=self.features)
else:
df=pd.DataFrame(coef,index=self.target_names,columns=self.features)
df.T.to_csv(filename + ".coef.csv")
plot_coefficients(df.T, topk=20, filename=filename)
stats_df = pd.DataFrame({"stats_index":[i[0] for i in self.stats], "stats_value":[str(i[1]) for i in self.stats]})
stats_df.to_csv(self.filename + ".log_stats.csv",index=False)
self.logger.info("Pipeline is finished")
def make_predictions(self, test_df, model):
'''test_df is in DataFrame format with index being samples'''
load_model = load(model)
predicts = load_model.predict(X)
predict_labels = [self.target_names[i] for i in predicts]
result = | pd.DataFrame({'SampleID': test_df.index, 'predicted label': predict_labels}) | pandas.DataFrame |
import pandas as pd
df_growth = df_realgr["Real Growth"]
df_vol = AvgVolUS['Avg Vol US']
df_series = df_inf["inflation"]
df_series = pd.merge(df_series, df_growth, on="Date", how="outer")
df_series = | pd.merge(df_series, df_vol, on="Date", how="outer") | pandas.merge |
#! /usr/bin/env python
from __future__ import division, print_function
import argparse
import collections
import logging
import os
import random
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
import keras
from keras import backend as K
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TensorBoard
from keras.utils import get_custom_objects
from keras.utils.vis_utils import plot_model
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from scipy.stats.stats import pearsonr
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import combo
import p1_common
# import p1_common_keras
from solr_keras import CandleRemoteMonitor, compute_trainable_params, TerminateOnTimeOut
# import argparser
# from datasets import NCI60
import NCI60
import combo
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
if K.backend() == 'tensorflow':
import tensorflow as tf
tf.set_random_seed(seed)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
# Uncommit when running on an optimized tensorflow where NUM_INTER_THREADS and
# NUM_INTRA_THREADS env vars are set.
# session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
# intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
def verify_path(path):
folder = os.path.dirname(path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
def set_up_logger(logfile, verbose):
verify_path(logfile)
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(sh)
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.E={}'.format(args.epochs)
ext += '.O={}'.format(args.optimizer)
# ext += '.LEN={}'.format(args.maxlen)
ext += '.LR={}'.format(args.learning_rate)
ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))
ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))
if args.feature_subsample > 0:
ext += '.FS={}'.format(args.feature_subsample)
if args.drop > 0:
ext += '.DR={}'.format(args.drop)
if args.warmup_lr:
ext += '.wu_lr'
if args.reduce_lr:
ext += '.re_lr'
if args.residual:
ext += '.res'
if args.use_landmark_genes:
ext += '.L1000'
if args.gen:
ext += '.gen'
if args.use_combo_score:
ext += '.scr'
for i, n in enumerate(args.dense):
if n > 0:
ext += '.D{}={}'.format(i+1, n)
if args.dense_feature_layers != args.dense:
for i, n in enumerate(args.dense):
if n > 0:
ext += '.FD{}={}'.format(i+1, n)
return ext
def discretize(y, bins=5):
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
return classes
class ComboDataLoader(object):
"""Load merged drug response, drug descriptors and cell line essay data
"""
def __init__(self, seed, val_split=0.2, shuffle=True,
cell_features=['expression'], drug_features=['descriptors'],
use_landmark_genes=False, use_combo_score=False,
preprocess_rnaseq=None, exclude_cells=[], exclude_drugs=[],
feature_subsample=None, scaling='std', scramble=False,
cv_partition='overlapping', cv=0):
"""Initialize data merging drug response, drug descriptors and cell line essay.
Shuffle and split training and validation set
Parameters
----------
seed: integer
seed for random generation
val_split : float, optional (default 0.2)
fraction of data to use in validation
cell_features: list of strings from 'expression', 'expression_5platform', 'mirna', 'proteome', 'all', 'categorical' (default ['expression'])
use one or more cell line feature sets: gene expression, microRNA, proteome
use 'all' for ['expression', 'mirna', 'proteome']
use 'categorical' for one-hot encoded cell lines
drug_features: list of strings from 'descriptors', 'latent', 'all', 'categorical', 'noise' (default ['descriptors'])
use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder
trained on NSC drugs, or both; use random features if set to noise
use 'categorical' for one-hot encoded drugs
shuffle : True or False, optional (default True)
if True shuffles the merged data before splitting training and validation sets
scramble: True or False, optional (default False)
if True randomly shuffle dose response data as a control
feature_subsample: None or integer (default None)
number of feature columns to use from cellline expressions and drug descriptors
use_landmark_genes: True or False
only use LINCS1000 landmark genes
use_combo_score: bool (default False)
use combination score in place of percent growth (stored in 'GROWTH' column)
scaling: None, 'std', 'minmax' or 'maxabs' (default 'std')
type of feature scaling: 'maxabs' to [-1,1], 'maxabs' to [-1, 1], 'std' for standard normalization
"""
self.cv_partition = cv_partition
np.random.seed(seed)
df = NCI60.load_combo_dose_response(use_combo_score=use_combo_score, fraction=True, exclude_cells=exclude_cells, exclude_drugs=exclude_drugs)
logger.info('Loaded {} unique (CL, D1, D2) response sets.'.format(df.shape[0]))
if 'all' in cell_features:
self.cell_features = ['expression', 'mirna', 'proteome']
else:
self.cell_features = cell_features
if 'all' in drug_features:
self.drug_features = ['descriptors', 'latent']
else:
self.drug_features = drug_features
for fea in self.cell_features:
if fea == 'expression' or fea == 'rnaseq':
self.df_cell_expr = NCI60.load_cell_expression_rnaseq(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes, preprocess_rnaseq=preprocess_rnaseq)
df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')
elif fea == 'expression_u133p2':
self.df_cell_expr = NCI60.load_cell_expression_u133p2(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes)
df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')
elif fea == 'expression_5platform':
self.df_cell_expr = NCI60.load_cell_expression_5platform(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes)
df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')
elif fea == 'mirna':
self.df_cell_mirna = NCI60.load_cell_mirna(ncols=feature_subsample, scaling=scaling)
df = df.merge(self.df_cell_mirna[['CELLNAME']], on='CELLNAME')
elif fea == 'proteome':
self.df_cell_prot = NCI60.load_cell_proteome(ncols=feature_subsample, scaling=scaling)
df = df.merge(self.df_cell_prot[['CELLNAME']], on='CELLNAME')
elif fea == 'categorical':
df_cell_ids = df[['CELLNAME']].drop_duplicates()
cell_ids = df_cell_ids['CELLNAME'].map(lambda x: x.replace(':', '.'))
df_cell_cat = pd.get_dummies(cell_ids)
df_cell_cat.index = df_cell_ids['CELLNAME']
self.df_cell_cat = df_cell_cat.reset_index()
for fea in self.drug_features:
if fea == 'descriptors':
self.df_drug_desc = NCI60.load_drug_descriptors(ncols=feature_subsample, scaling=scaling)
df = df[df['NSC1'].isin(self.df_drug_desc['NSC']) & df['NSC2'].isin(self.df_drug_desc['NSC'])]
elif fea == 'latent':
self.df_drug_auen = NCI60.load_drug_autoencoded_AG(ncols=feature_subsample, scaling=scaling)
df = df[df['NSC1'].isin(self.df_drug_auen['NSC']) & df['NSC2'].isin(self.df_drug_auen['NSC'])]
elif fea == 'categorical':
df_drug_ids = df[['NSC1']].drop_duplicates()
df_drug_ids.columns = ['NSC']
drug_ids = df_drug_ids['NSC']
df_drug_cat = pd.get_dummies(drug_ids)
df_drug_cat.index = df_drug_ids['NSC']
self.df_drug_cat = df_drug_cat.reset_index()
elif fea == 'noise':
ids1 = df[['NSC1']].drop_duplicates().rename(columns={'NSC1':'NSC'})
ids2 = df[['NSC2']].drop_duplicates().rename(columns={'NSC2':'NSC'})
df_drug_ids = pd.concat([ids1, ids2]).drop_duplicates()
noise = np.random.normal(size=(df_drug_ids.shape[0], 500))
df_rand = pd.DataFrame(noise, index=df_drug_ids['NSC'],
columns=['RAND-{:03d}'.format(x) for x in range(500)])
self.df_drug_rand = df_rand.reset_index()
logger.info('Filtered down to {} rows with matching information.'.format(df.shape[0]))
ids1 = df[['NSC1']].drop_duplicates().rename(columns={'NSC1':'NSC'})
ids2 = df[['NSC2']].drop_duplicates().rename(columns={'NSC2':'NSC'})
df_drug_ids = pd.concat([ids1, ids2]).drop_duplicates().reset_index(drop=True)
n_drugs = df_drug_ids.shape[0]
n_val_drugs = int(n_drugs * val_split)
n_train_drugs = n_drugs - n_val_drugs
logger.info('Unique cell lines: {}'.format(df['CELLNAME'].nunique()))
logger.info('Unique drugs: {}'.format(n_drugs))
# df.to_csv('filtered.growth.min.tsv', sep='\t', index=False, float_format='%.4g')
# df.to_csv('filtered.score.max.tsv', sep='\t', index=False, float_format='%.4g')
if shuffle:
df = df.sample(frac=1.0, random_state=seed).reset_index(drop=True)
df_drug_ids = df_drug_ids.sample(frac=1.0, random_state=seed).reset_index(drop=True)
self.df_response = df
self.df_drug_ids = df_drug_ids
self.train_drug_ids = df_drug_ids['NSC'][:n_train_drugs]
self.val_drug_ids = df_drug_ids['NSC'][-n_val_drugs:]
if scramble:
growth = df[['GROWTH']]
random_growth = growth.iloc[np.random.permutation(np.arange(growth.shape[0]))].reset_index()
self.df_response[['GROWTH']] = random_growth['GROWTH']
logger.warn('Randomly shuffled dose response growth values.')
logger.info('Distribution of dose response:')
logger.info(self.df_response[['GROWTH']].describe())
self.total = df.shape[0]
self.n_val = int(self.total * val_split)
self.n_train = self.total - self.n_val
logger.info('Rows in train: {}, val: {}'.format(self.n_train, self.n_val))
self.cell_df_dict = {'expression': 'df_cell_expr',
'expression_5platform': 'df_cell_expr',
'expression_u133p2': 'df_cell_expr',
'rnaseq': 'df_cell_expr',
'mirna': 'df_cell_mirna',
'proteome': 'df_cell_prot',
'categorical': 'df_cell_cat'}
self.drug_df_dict = {'descriptors': 'df_drug_desc',
'latent': 'df_drug_auen',
'categorical': 'df_drug_cat',
'noise': 'df_drug_rand'}
self.input_features = collections.OrderedDict()
self.feature_shapes = {}
for fea in self.cell_features:
feature_type = 'cell.' + fea
feature_name = 'cell.' + fea
df_cell = getattr(self, self.cell_df_dict[fea])
self.input_features[feature_name] = feature_type
self.feature_shapes[feature_type] = (df_cell.shape[1] - 1,)
for drug in ['drug1', 'drug2']:
for fea in self.drug_features:
feature_type = 'drug.' + fea
feature_name = drug + '.' + fea
df_drug = getattr(self, self.drug_df_dict[fea])
self.input_features[feature_name] = feature_type
self.feature_shapes[feature_type] = (df_drug.shape[1] - 1,)
self.feature_shapes['dose'] = (1,)
for dose in ['dose1', 'dose2']:
self.input_features[dose] = 'dose'
logger.info('Input features shapes:')
for k, v in self.input_features.items():
logger.info(' {}: {}'.format(k, self.feature_shapes[v]))
self.input_dim = sum([np.prod(self.feature_shapes[x]) for x in self.input_features.values()])
logger.info('Total input dimensions: {}'.format(self.input_dim))
if cv > 1:
if cv_partition == 'disjoint':
pass
elif cv_partition == 'disjoint_cells':
y = self.df_response['GROWTH'].values
groups = self.df_response['CELLNAME'].values
gkf = GroupKFold(n_splits=cv)
splits = gkf.split(y, groups=groups)
self.cv_train_indexes = []
self.cv_val_indexes = []
for index, (train_index, val_index) in enumerate(splits):
print(index, train_index)
self.cv_train_indexes.append(train_index)
self.cv_val_indexes.append(val_index)
else:
y = self.df_response['GROWTH'].values
# kf = KFold(n_splits=cv)
# splits = kf.split(y)
skf = StratifiedKFold(n_splits=cv, random_state=seed)
splits = skf.split(y, discretize(y, bins=cv))
self.cv_train_indexes = []
self.cv_val_indexes = []
for index, (train_index, val_index) in enumerate(splits):
print(index, train_index)
self.cv_train_indexes.append(train_index)
self.cv_val_indexes.append(val_index)
def load_data_all(self, switch_drugs=False):
df_all = self.df_response
y_all = df_all['GROWTH'].values
x_all_list = []
for fea in self.cell_features:
df_cell = getattr(self, self.cell_df_dict[fea])
df_x_all = pd.merge(df_all[['CELLNAME']], df_cell, on='CELLNAME', how='left')
x_all_list.append(df_x_all.drop(['CELLNAME'], axis=1).values)
# for fea in loader.cell_features:
# df_cell = getattr(loader, loader.cell_df_dict[fea])
# df_x_all = pd.merge(df_all[['CELLNAME']], df_cell, on='CELLNAME', how='left')
# df_x_all[:1000].to_csv('df.{}.1k.csv'.format(fea), index=False, float_format="%g")
drugs = ['NSC1', 'NSC2']
doses = ['pCONC1', 'pCONC2']
if switch_drugs:
drugs = ['NSC2', 'NSC1']
doses = ['pCONC2', 'pCONC1']
for drug in drugs:
for fea in self.drug_features:
df_drug = getattr(self, self.drug_df_dict[fea])
df_x_all = pd.merge(df_all[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')
x_all_list.append(df_x_all.drop([drug, 'NSC'], axis=1).values)
for dose in doses:
x_all_list.append(df_all[dose].values)
# for drug in drugs:
# for fea in loader.drug_features:
# df_drug = getattr(loader, loader.drug_df_dict[fea])
# df_x_all = pd.merge(df_all[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')
# print(df_x_all.shape)
# df_x_all[:1000].drop([drug], axis=1).to_csv('df.{}.{}.1k.csv'.format(drug, fea), index=False, float_format="%g")
# df_all[:1000].to_csv('df.growth.1k.csv', index=False, float_format="%g")
return x_all_list, y_all, df_all
def load_data_by_index(self, train_index, val_index):
x_all_list, y_all, df_all = self.load_data_all()
x_train_list = [x[train_index] for x in x_all_list]
x_val_list = [x[val_index] for x in x_all_list]
y_train = y_all[train_index]
y_val = y_all[val_index]
df_train = df_all.iloc[train_index, :]
df_val = df_all.iloc[val_index, :]
if self.cv_partition == 'disjoint':
logger.info('Training drugs: {}'.format(set(df_train['NSC1'])))
logger.info('Validation drugs: {}'.format(set(df_val['NSC1'])))
elif self.cv_partition == 'disjoint_cells':
logger.info('Training cells: {}'.format(set(df_train['CELLNAME'])))
logger.info('Validation cells: {}'.format(set(df_val['CELLNAME'])))
return x_train_list, y_train, x_val_list, y_val, df_train, df_val
def load_data_cv(self, fold):
train_index = self.cv_train_indexes[fold]
val_index = self.cv_val_indexes[fold]
# print('fold', fold)
# print(train_index[:5])
return self.load_data_by_index(train_index, val_index)
def load_data(self):
if self.cv_partition == 'disjoint':
train_index = self.df_response[(self.df_response['NSC1'].isin(self.train_drug_ids)) & (self.df_response['NSC2'].isin(self.train_drug_ids))].index
val_index = self.df_response[(self.df_response['NSC1'].isin(self.val_drug_ids)) & (self.df_response['NSC2'].isin(self.val_drug_ids))].index
else:
train_index = range(self.n_train)
val_index = range(self.n_train, self.total)
return self.load_data_by_index(train_index, val_index)
def load_data_old(self):
# bad performance (4x slow) possibly due to incontiguous data
df_train = self.df_response.iloc[:self.n_train, :]
df_val = self.df_response.iloc[self.n_train:, :]
y_train = df_train['GROWTH'].values
y_val = df_val['GROWTH'].values
x_train_list = []
x_val_list = []
for fea in self.cell_features:
df_cell = getattr(self, self.cell_df_dict[fea])
df_x_train = pd.merge(df_train[['CELLNAME']], df_cell, on='CELLNAME', how='left')
df_x_val = pd.merge(df_val[['CELLNAME']], df_cell, on='CELLNAME', how='left')
x_train_list.append(df_x_train.drop(['CELLNAME'], axis=1).values)
x_val_list.append(df_x_val.drop(['CELLNAME'], axis=1).values)
for drug in ['NSC1', 'NSC2']:
for fea in self.drug_features:
df_drug = getattr(self, self.drug_df_dict[fea])
df_x_train = pd.merge(df_train[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')
df_x_val = | pd.merge(df_val[[drug]], df_drug, left_on=drug, right_on='NSC', how='left') | pandas.merge |
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from matplotlib.collections import PatchCollection
from sklearn import linear_model
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from importlib import reload
# Constants
#files = ['time_series_19-covid-Confirmed', 'time_series_19-covid-Deaths', 'time_series_19-covid-Recovered']
#labels = ['Confirmed', 'Deaths', 'Recovered']# until 23 March 2020
# Since 24 March 2020
#files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global']
#labels = ['confirmed', 'deaths']
# Since 28 March 2020
files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global', 'time_series_covid19_recovered_global']
labels = ['confirmed', 'deaths', 'recovered']
def open_csvs():
'''
Finding and opening your most recent data download if timestamp == None.
Alternatively, specify a substring of requested timestamp to select which files to open.
'''
timestamp = None
#timestamp = '20200330_15-26'
df=dict()
lists = list([list(), list(), list()])
with os.scandir() as it:
for entry in it:
for i in range(3):
if (timestamp==None or timestamp in entry.name) and files[i] in entry.name\
and entry.is_file():
lists[i].append(entry.name)
for i in range(3):
lists[i].sort()
df[labels[i]] = pd.read_csv(lists[i][-1])
return df
def data_preparation(df, country, output):
'''
This is used for the JHU CSSE dataset.
output can be 'confirmed', 'deaths', 'recovered', 'active' or 'all'
'active' returns dft['confirmed']-dft['deaths']-dft['recovered']
'all' returns all three as columns in a DataFrame as used in death_over_cases.py
'''
sets = dict({'EU': ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']})#,
#'China': [['Anhui', 'China'], ['Beijing', 'China'], ['Chongqing', 'China'], ['Fujian', 'China'], ['Gansu', 'China'], ['Guangdong', 'China'], ['Guangxi', 'China'], ['Guizhou', 'China'], ['Hainan', 'China'], ['Hebei', 'China'], ['Heilongjiang', 'China'], ['Henan', 'China'], ['Hong Kong', 'China'], ['Hubei', 'China'], ['Hunan', 'China'], ['Inner Mongolia', 'China'], ['Jiangsu', 'China'], ['Jiangxi', 'China'], ['Jilin', 'China'], ['Liaoning', 'China'], ['Macau', 'China'], ['Ningxia', 'China'], ['Qinghai', 'China'], ['Shaanxi', 'China'], ['Shandong', 'China'], ['Shanghai', 'China'], ['Shanxi', 'China'], ['Sichuan', 'China'], ['Tianjin', 'China'], ['Tibet', 'China'], ['Xinjiang', 'China'], ['Yunnan', 'China'], ['Zhejiang', 'China']]})
#sets = dict({'EU': ['Croatia', 'Hungary']}) # test only
l = list()
if country == 'EU' or country == 'China' or country == 'Australia':
''' First, recursive implementation
l_members = list()
for member in sets[country]:
l_members.append(data_preparation(df, member, only_cases))
dft_members = pd.concat(l_members, axis=1)
return dft_members.sum(axis=1)
'''
M = dict() # these matrices are the booleans of selections for each Province/State, we take their multiple
for i in range(3):
k = labels[i]
M[k] = list()
if country == 'China' or country == 'Australia':
M[k].append((df[k]['Province/State'].notna()) & (df[k]['Country/Region']==country))
l.append(df[k][M[k][0]].iloc[:,4:].sum(axis=0))
else: # country == 'EU'
for member in sets[country]:
#print(member)
if isinstance(member, str):
M[k].append((df[k]['Province/State'].isna()) & (df[k]['Country/Region']==member))
elif len(member)==2: # if it's a pair of [Province/State, Country/Region]
M[k].append((df[k]['Province/State']==member[0])
& (df[k]['Country/Region']==member[1]))
l.append(df[k][np.sum(np.array(M[k]), axis=0)>=1].iloc[:,4:].sum(axis=0))
dft = pd.concat(l, ignore_index=True, axis=1)
#dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
else:
for i in range(3):
k = labels[i]
if isinstance(country, str):
l.append(df[k][np.logical_and(df[k]['Province/State'].isna(),
df[k]['Country/Region']==country)].iloc[:,4:])
elif len(country)==2: # if it's a pair of [Province/State, Country/Region]
l.append(df[k][np.logical_and(df[k]['Province/State']==country[0],
df[k]['Country/Region']==country[1])].iloc[:,4:])
dft = pd.concat(l, ignore_index=True, axis=0).transpose()
#print(dft)
dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
#print(dft)
if output=='all':
df_ts = dft
elif output=='active':
print('Number of recovered in the past eight days:')
print(dft['recovered'][-8:])
df_ts = dft['confirmed']-dft['deaths']-dft['recovered'] # On 24 March 2020, recovered is not available; on 28 March 2020 it is there again.
else:
df_ts = dft[output]
#print(df_ts)
#df_ts.rename(index={df_ts.index[i]: pd.to_datetime(df_ts.index)[i] for i in range(len(df_ts.index))}, inplace=True)
df_ts.rename(index=pd.Series(df_ts.index, index=df_ts.index).apply(lambda x: pd.to_datetime(x)), inplace=True)
#print(df_ts)
return df_ts
def rm_early_zeros(ts):
'''
Removes early zeros and NaNs from a pandas time series. It finds last (most recent) zero or NaN in
time series and omits all elements before and including this last zero or NaN. Returns the remaining
time series which is free of zeros and NaN.
pd.Series([0,0,0,0,1,2,0,0,3,6]) -> pd.Series([3,6])
'''
zeroindices = ts[(ts==0) | ts.isna()].index
if len(zeroindices)==0:
return ts
else:
successor = np.nonzero((ts.index==zeroindices.max()))[0][0] + 1
return ts[successor:]
def rm_consecutive_early_zeros(ts, keep=1):
'''
Removes first consecutive subsequence of early zeros from a pandas time series
except for the last keep if there are that many.
rm_consecutive_early_zeros(pd.Series([0,0,0,0,1,2,3,6]), 2) -> pd.Series([0,0,1,2,3,6])
'''
zeroindices = ts[ts==0].index
if len(zeroindices)==0:
return ts
else:
first_pos_index = np.nonzero((ts.index==ts[ts>0].index[0]))[0][0]
if first_pos_index <= keep:
return ts
else:
return ts[first_pos_index-keep:]
def separated(s, lang='en', k=3):
'''
Input must be a string. Puts a comma between blocks of k=3 digits:
'1000000' -> '1,000,000'
'''
if lang == 'de':
chr = '.'
else:
chr = ','
if len(s)>=5:
l=list()
for i in range(len(s)//k):
l.insert(0, s[len(s)-(i+1)*k:len(s)-i*k])
if len(s) % k !=0:
l.insert(0, s[:len(s)-(i+1)*k])
return chr.join(l)
else:
return s
def x2str(x, width):
'''
Rounds a number to tenths. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
#if x<0.1 and x>-0.1 and width>=6:
# s = '{:.3f}'.format(x) #str(round(x*1000)/1000)
if x<1 and x>-1 and width>=5:
s = '{:.2f}'.format(x) #str(round(x*100)/100)
elif x<10 and x>-10 and width>=4:
s = '{:.1f}'.format(x) #str(round(x*10)/10)
else:
s = '{:.0f}'.format(x) #str(int(round(x)))
if width > len(s):
return s.rjust(width)
else:
return s
def n2str(n, width):
'''
Takes integers. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
s = str(n)
if width > len(s):
return s.rjust(width)
else:
return s
def interpolate(df_ts, window_length):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entry at last entry minus
(window_length-1) days.
'''
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
return c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
return nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
return prv.iloc[-1]
'''
def truncate_before(df_ts, window_length):
#This returns (or interpolates, if not found) from the time series the entry at last entry minus
# (window_length-1) days.
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi:]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(nxt)>0:
df_ts.loc[doi] = nxt.iloc[0]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts.loc[doi] = prv.iloc[-1]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
'''
def truncate_before(df_ts, window_length, fill_all_missing):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entries from (last entry minus
(window_length-1) days) until the last entry.
When some days are missing from the cumulative time series df_ts, then I could assign them zero increments and
assign all increments to the first day after the gap. Instead, I spread out the growth uniformly across the
missing days. The first solution (0, 0, all increment) would give the fitting a tendency to see quickly
growing cumulatives.
'''
df_ts_new = df_ts.copy()
r = range(window_length-1, 0, -1) if fill_all_missing else [window_length-1]
for i in r:
# date of interest:
#doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
doi = df_ts.index[-1]-pd.Timedelta(f'{i} days')
if doi not in df_ts.index:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts_new.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
df_ts_new.loc[doi] = nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts_new.loc[doi] = prv.iloc[-1]
df_ts_new = df_ts_new.sort_index(inplace=False)
return df_ts_new.loc[df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days'):]
def analysis(df_ts, window_length, exp_or_lin, extent='full'):
'''
df_ts: pd.Series, it is a time series, can be totals or no. per e.g. 100,000 ppl
window_length: int
exp_or_lin in ['exp', 'lin']
For 'exp', because of log2, this requires all entries in df_ts to be positive.
For 'lin', because of log2, this requires last entry in df_ts to be positive.
extent in ['full', 'minimal']
'minimal' doesn't compute predictions.
output: results = [
daily increment in natural units (units of df_ts): float,
daily growth rate in percentage: float,
doubling time in days: float or 0 for 'minimal',
current cases (df_ts.iloc[-1]),
projection_lower: type(df_ts.dtype) or 0 for 'minimal',
projection_upper: type(df_ts.dtype) or 0 for 'minimal',
model_score=R^2: float,
difference of model fit on last date and last data point in log space: float
]
model: sklearn.linear_model
#failure: 0 or 1; 1 if it failed due to nonpositive number in exponential fit or too short time series
'''
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
#if len(i_ts)<window_length:# or (exp_or_lin=='exp' and (i_ts.iloc[-window_length:]<=0).sum()>=5):
if len(i_ts)==0 or (i_ts.index[-1]-i_ts.index[0]).days<window_length-1:
results = 8 * [0]
results[-1] = 100
return results, None
intl_lo_days = 4
intl_hi_days = 6
results = [None] * 8
results[3] = df_ts.iloc[-1]
model = linear_model.LinearRegression(fit_intercept=True)
if exp_or_lin=='exp':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
i_ts[i_ts<=0] = 1
y = i_ts.values
ylog = np.log(y)
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), ylog)
results[0] = math.exp(model.intercept_)
# For doubling, the area of the increments is equal to df_ts[-1]
# cf. https://www.wolframalpha.com/input/?i=integrate+%28exp%28a+t+%2Bb%29+dt%29+from+t%3D0+to+x
if model.coef_[0]!=0:
temp2 = math.exp(model.intercept_)/model.coef_[0]
temp = model.coef_[0]*df_ts.iloc[-1]/math.exp(model.intercept_) + 1
if temp>0:
results[2] = math.log(temp)/model.coef_[0]
else:
results[2] = np.inf
else:
results[2] = df_ts.iloc[-1]/math.exp(model.intercept_)
if extent == 'full':
if model.coef_[0]!=0:
results[4] = (math.exp(model.coef_[0]*intl_lo_days)-1)*temp2 + df_ts.iloc[-1]
results[5] = (math.exp(model.coef_[0]*intl_hi_days)-1)*temp2 + df_ts.iloc[-1]
else:
results[4] = math.exp(model.intercept_)*intl_lo_days + df_ts.iloc[-1]
results[5] = math.exp(model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#if (i_ts_orig.iloc[-window_length:]>0).all():
#if (truncate_before(i_ts_orig, window_length, fill_all_missing=False)>0).all():
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
if (i_ts_0>0).all():
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), ylog)
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), ylog)
else:
results[6] = 0
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
#if df_ts.iloc[-1]==interpolate(df_ts, window_length): # If there is no growth, then exp is not good approx.
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then exp is not good approx.
results[7] = 100 # Exp overestimates growth by a factor of infinity.
else:
if model.coef_[0]!=0:
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
else:
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
elif exp_or_lin=='lin':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
y = i_ts.values
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), y)
results[0] = model.intercept_
if model.coef_[0]!=0:
if 2*model.coef_[0]*df_ts.iloc[-1] >= - model.intercept_*model.intercept_:
results[2] = (-model.intercept_ + math.sqrt(model.intercept_*model.intercept_ + 2*model.coef_[0]*df_ts.iloc[-1]))/model.coef_[0]
else:
results[2] = np.inf
else:
if model.intercept_!=0:
results[2] = df_ts.iloc[-1]/model.intercept_
else:
if df_ts.iloc[-1]!=0:
results[2] = np.inf
else:
results[2] = 0 # model.coef_[0]==model.intercept_==0
if extent == 'full':
if model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_lo_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_lo_days*model.coef_)):
# there is a zero-crossing until intl_lo_days
results[4] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
results[5] = results[4]
elif model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_hi_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_hi_days*model.coef_)):
# there is a zero-crossing after intl_lo_days, before intl_hi_days
results[5] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
if results[4] is None:
results[4] = (model.coef_[0]*intl_lo_days/2+model.intercept_)*intl_lo_days + df_ts.iloc[-1]
if results[5] is None:
results[5] = (model.coef_[0]*intl_hi_days/2+model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), y)
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), y)
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
first_day = df_ts.index[-1]- | pd.Timedelta(f'{window_length-1} days') | pandas.Timedelta |
########################################################################
# Copyright 2020 Battelle Energy Alliance, LLC ALL RIGHTS RESERVED #
# Mobility Systems & Analytics Group, Idaho National Laboratory #
########################################################################
# Location Generalizer
# Release 1.2 8/10/2021
import pyodbc
import pandas as pd
import pickle
from datetime import datetime, timedelta
import time
import math
import yaml
from pathlib import Path
import csv
import numpy as np
from sklearn.cluster import DBSCAN
from shapely import geometry
from shapely.geometry import MultiPoint
from haversine import haversine, Unit
import pynput
from pandasql import sqldf
import time
from location_generalizer.utils import parallel_func_wrapper_update_vlocation
from location_generalizer.dataclasses import StartSEColumnMappings, EndSEColumnMappings
class cfg():
with open('locationGeneralizer.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
odbcConnectionString=config['odbcConnectionString']
inputTableOrCSV= config['inputTableOrCSV']
vehiclesInChunk = config['vehiclesInChunk']
qryVehicleIDList =config['qryVehicleIDList']
qryVehicleInfo = config['qryVehicleInfo']
qryVehicleIDList = qryVehicleIDList.replace('{inputsrc}', inputTableOrCSV)
qryVehicleInfo = qryVehicleInfo.replace('{inputsrc}', inputTableOrCSV)
errorLogFileName = config['errorLogFileName']
heartbeatFileName = config['heartbeatFileName']
locationInfoFileName = config['locationInfoFileName']
homeInfoFileName = config['homeInfoFileName']
pklCensusDivisionsFileName = config['pklCensusDivisionsFileName']
evseLookupFileName = config['evseLookupFileName']
bboxes = config['boundingBoxes']
gpsOdoThreshold_mi = config['gpsOdoThreshold_mi']
minTrips = config['minTrips']
minLastTrips = config['minLastTrips']
minPctParks = config['minPctParks']
numL2Rounding = config['numL2Rounding']
numDCRounding = config['numDCRounding']
doCheck = config['doCheck']
dayEndHours = config['dayEndHours']
dayEndMinutes = config['dayEndMinutes']
dbscan_eps_ft = config['dbscan_eps_ft']
dbscan_min_spls = config['dbscan_min_spls']
evseDistRange_Miles = config['evseDistRange_Miles']
evseLatRange = config['evseLatRange']
evseLonRange = config['evseLonRange']
addClusterIDtoLocationInfo = config['addClusterIDtoLocationInfo']
hdrErrorLogCSV = config['hdrErrorLogCSV']
if addClusterIDtoLocationInfo:
hdrLocationInfoCSV = config['hdrClusterLocationInfoCSV']
else:
hdrLocationInfoCSV = config['hdrLocationInfoCSV']
hdrHomeInfoCSV = config['hdrHomeInfoCSV']
if addClusterIDtoLocationInfo:
colLocationInfo = config['colClusterLocationInfo']
else:
colLocationInfo = config['colLocationInfo']
colHomeInfo = config['colHomeInfo']
verbosity = config['verbosity']
stopProcessing = False
numCores = config['num_cores']
errFilePath = Path(errorLogFileName)
if not errFilePath.exists():
# ErroLog output file
hdr = pd.DataFrame(hdrErrorLogCSV)
hdr.to_csv(errorLogFileName, index=False, header=False, mode='w')
# use one line buffering - every line written is flushed to disk
errorFile = open(errorLogFileName, mode='a', buffering=1, newline='')
errorWriter = csv.writer(errorFile)
def main():
# trust chained assignments (no warnings)
pd.set_option('mode.chained_assignment', None)
# LocationInfo output file
locationFilePath = Path(cfg.locationInfoFileName)
if not locationFilePath.exists():
hdr = pd.DataFrame(cfg.hdrLocationInfoCSV)
hdr.to_csv(cfg.locationInfoFileName, index=False, header=False, mode='w')
# HomeInfo output file
homeFilePath = Path(cfg.homeInfoFileName)
if not homeFilePath.exists():
hdr = pd.DataFrame(cfg.hdrHomeInfoCSV)
hdr.to_csv(cfg.homeInfoFileName, index=False, header=False, mode='w')
## geopandas can read the shapefile directly, but we pickled it into one file
## a single pickle file simplifies distribution whereas,
## loading a shapefile requires several adjacent accompanying files
divisions = pd.read_pickle(cfg.pklCensusDivisionsFileName)
# get Public EVSE stations
EVSEs = pd.read_csv(cfg.evseLookupFileName)
# pyodbc attempts to turn off autocommit before returning connection and this causes file connections (like CSVs), which do not support transactions, to fail
# when autocommit is explictly set, pyodbc will not attempt any changes
cnxn = pyodbc.connect(cfg.odbcConnectionString, autocommit=True)
lastVehicle = 0
hbFilePath = Path(cfg.heartbeatFileName)
if hbFilePath.exists():
with open(hbFilePath, 'r') as hb:
lastVehicle = hb.readline()
cfg.errorWriter.writerow([datetime.now(), lastVehicle, -1,'Restarting after vehicle {}'.format(lastVehicle)])
print('Restarting after vehicle {}'.format(lastVehicle))
# get sorted list of all vehicle IDs
qry = cfg.qryVehicleIDList.replace('{startVehicle}', str(lastVehicle))
df = pd.read_sql(qry, cnxn)
numOfVehicles = cfg.vehiclesInChunk # number of vehicle to process at a time. We can't process all at once due to dataset size, so this is the "chunk size" to process
vehicleList = df['VehicleID'].tolist()
# divide up vehicle ID list into sections of <numOfVehicle> length chunks (we'll read data in one chunk at a time to avoid memory overrun)
chunks = [vehicleList[i * numOfVehicles:(i+1)*numOfVehicles] for i in range((len(vehicleList) + numOfVehicles -1) // numOfVehicles)]
i = 0
vcnt = 0
for chunk in chunks:
chunkList = ','.join(str(e) for e in chunk)
qry = cfg.qryVehicleInfo.format(chunkList) # insert vehicleIDs into "in" list
if cfg.verbosity > 0: print('Fetching data')
chunkData = pd.read_sql(qry, cnxn, parse_dates=['TripStartLocalTime', 'TripEndLocalTime'])
# create new column for flag to exclude bad records
chunkData['Include'] = True
i += 1
print("chunk: {}, vehicle from {} through {}".format(i, chunk[0], chunk[-1]))
# iterate through one vehicle at a time
for v in chunk:
if cfg.stopProcessing: exit()
if cfg.verbosity > 0: print('Vehicle: {}'.format(v))
# create empty LocationInfo data frame
# GPS coordinates are added here for convenience, but will not be carried into LocationInfo output file
locationInfo = pd.DataFrame(columns = cfg.colLocationInfo)
# create empty HomeInfo data frame
homeInfo = pd.DataFrame(columns = cfg.colHomeInfo)
homeInfo['HomeStartLocalTime'] = pd.NaT
homeInfo['HomeEndLocalTime'] = pd.NaT
vcnt += 1
# grab all records in vehicle v
vData = chunkData[chunkData['VehicleID'] == v]
# create new column to check for Odometer gaps, i.e missing trips
vData['resid_Miles'] = vData['TripStartOdometer_Miles'].shift(periods=-1) - vData['TripEndOdometer_Miles']
### Check validity of data, marking invalid records (Include = True/False)
if cfg.verbosity > 1: print(' Check for valid values')
if cfg.doCheck: vData = DoValidityChecking(v, vData)
vData.resid_Miles = vData.resid_Miles.astype(object).where(vData.resid_Miles.notnull(), None) # set NaN to None (becomes Null for DB)
# toss out rows that failed vailidity check
vData = vData[vData.Include == True]
numTrips = len(vData)
if numTrips < cfg.minTrips:
if cfg.verbosity > 1: print(' Not enough trips, vehicle skipped.')
cfg.errorWriter.writerow([datetime.now(), v, -1,'Not enough trips, vehicle skipped. ({} need >= {})'.format(numTrips, cfg.minTrips)])
else:
# create new column for identify first/last trip of day
vData['TripFlag'] = None
### Identify first and last of trip of day
if cfg.verbosity > 1: print(' Defining first/last trip of day')
vData = flagTrips(v, vData)
### Find clusters of vehicle locations
if cfg.verbosity > 1: print(' Clustering')
vData = clusterData(v, vData)
# # drop rows - remove previous vehicle info
# homeInfo.drop(homeInfo.index, inplace=True)
# locationInfo.drop(locationInfo.index, inplace=True)
# add row to LocationInfo data frame
liList = [vData[['VehicleID', 'TripStartLocalTime', 'TripEndLocalTime', 'TripStartLatitude', 'TripStartLongitude', 'TripEndLatitude','TripEndLongitude', 'TripStartClusterID', 'TripEndClusterID']]]
locationInfo = locationInfo.append(liList, ignore_index=True)
# add ParkEndLocalTime for convenience - its the same as TripStartLocalTime in next row
vData['ParkEndLocalTime'] = vData['TripStartLocalTime'].shift(periods=-1)
vData['ParkDuration_hr'] = (vData['ParkEndLocalTime'] - vData['TripEndLocalTime'])/np.timedelta64(1,'h')
########################
#### FIND HOME DECISION TREE return more than one home, but must returns an array to conform with other methods that may return more that one)
if cfg.verbosity > 1: print(' Identifying home location')
vData = findHome_DecisionTree(v, vData)
homeClusters = list(set(vData[vData['location'] == 'home']['TripEndClusterID']))
########################
if len(homeClusters) == 0:
cfg.errorWriter.writerow([datetime.now(), v, -1,'No home clusters found - vehicle skipped.'])
continue # continue with next vehicle
########################
#### PROCESS HOME AND LOCATION INFO returns the data we want to push as output files
if cfg.verbosity > 1: print(' Calculating output metrics')
isOK, locationInfo, homeInfo = processHome(v, divisions, vData, locationInfo, homeInfo, homeClusters, EVSEs)
if not isOK:
continue
########################
############################# IMPORTANT #########################################
# CLEANUP HOMEINFO AND LOCATIONINFO FOR EXPORT (remove sensitive data)
homeInfo.drop(homeInfo[homeInfo['Primary'].isnull()].index, inplace=True)
homeInfo.drop(['CentroidLatitude', 'CentroidLongitude', 'Primary'], axis=1, inplace=True)
locationInfo.drop(['TripStartLatitude','TripStartLongitude','TripEndLatitude','TripEndLongitude'], axis=1, inplace=True)
if not cfg.addClusterIDtoLocationInfo:
locationInfo.drop(['TripStartClusterID','TripEndClusterID'], axis=1, inplace=True)
#################################################################################
# write to output files
if cfg.verbosity > 1: print(' Writing to output files')
locationInfo.to_csv(cfg.locationInfoFileName, index=False, header=False, mode='a')
homeInfo.to_csv(cfg.homeInfoFileName, index=False, header=False, mode='a')
# # use one line buffering - every line written is flushed to disk
with open(cfg.heartbeatFileName, mode='w', buffering=1, newline='') as hb:
hb.write(str(v))
def findHome_DecisionTree(v, vData):
vData['location'] = 'unknown'
## apply filter rules to get qualifying clusters
vQual = rule1(vData) # this does not find good clusters, but checks if there is enough vehicle data to continue
if vQual.empty:
cfg.errorWriter.writerow([datetime.now(), v, -1,'Vehicle failed at rule 1'])
return vData # leave all clusters marked unknown
vQual = rule2(vData) # vData (the complete datset) is passed and we start eliminating clusters to find only those of interest
if vQual.empty:
cfg.errorWriter.writerow([datetime.now(), v, -1,'Vehicle failed at rule 2'])
return vData # leave all clusters marked unknown
vQual = rule3(vQual) # vQual is passed and further filtered
if vQual.empty:
cfg.errorWriter.writerow([datetime.now(), v, -1,'Vehicle failed at rule 3'])
return vData # leave all clusters marked unknown
qualClusters = vQual['TripEndClusterID']
vData['location'] = 'away'
# apply decision tree to get home clusters
homeClusters = decision1(vData, qualClusters) # find home by 65% rule
if not homeClusters.empty:
vData.loc[vData.TripEndClusterID.isin(homeClusters.TripEndClusterID), ['location']] = 'home'
homeClusters = decision2(vData, qualClusters) # look for home in remaining clusters
if not homeClusters.empty:
vData.loc[vData.TripEndClusterID.isin(homeClusters.tolist()), ['location']] = 'home'
return vData
def rule1(vQual):
# 1. vehicle must have at least 30 days of known last trips
ids = vQual[((vQual['TripFlag'] == 'L') | (vQual['TripFlag'] == 'FL')) & (vQual['resid_Miles'] > -1) & (vQual['resid_Miles'] < 1)]['TripEndClusterID']
if len(ids) < 30:
vQual = vQual[0:0]
return vQual
def rule2(vQual):
# 2. get clusters with > 21 days between first and last parks (i.e. 'cluster period')
#### get number of days, grouped by cluster id, between first and last park
ids = vQual.groupby('TripEndClusterID')['TripEndDateOffset'].max() - vQual.groupby('TripEndClusterID')['TripEndDateOffset'].min()
if ids.empty:
vQual = vQual[0:0]
else:
ids = ids.dt.days
#### add one day to max-min range
ids = ids.add(1)
#### get cluster ids with more than 21 days between first and last park
ids = ids[ids > 21]
if ids.empty:
vQual = vQual[0:0]
else:
#### get data belonging to qualifying clusters
vQual = vQual[vQual['TripEndClusterID'].isin(ids.index.tolist())]
return vQual
def rule3(vQual):
# 3. get clusters with >= 15 parks during the cluster period
#### get number of parks, grouped by cluster id
ids = vQual.groupby('TripEndClusterID')['TripEndClusterID'].count()
#### get cluster ids having at least 15 parks
ids = ids[ids >= 15]
if ids.empty:
vQual = vQual[0:0]
else:
#### get data belonging to qualifying clusters
vQual = vQual[vQual['TripEndClusterID'].isin(ids.index.tolist())]
return vQual
def decision1(vData, qualClusters):
# how many days in period, grouped by cluster
ids = vData[((vData['TripFlag'] == 'L') | (vData['TripFlag'] == 'FL')) & (vData['resid_Miles'] > -1) & (vData['resid_Miles'] < 1)][['TripEndClusterID','TripEndDateOffset']]
daysInPeriod = ids.groupby('TripEndClusterID').count() # NumDaysInPeriodWithParkAKLTInCluster
# how many driving days in period, grouped by cluster
vMin = vData.groupby('TripEndClusterID')['TripEndDateOffset'].min()
vMax = vData.groupby('TripEndClusterID')['TripEndDateOffset'].max()
vRange = pd.concat([vMin, vMax], axis=1)
vRange.columns=['minDate', 'maxDate']
q = '''
select TripEndClusterId, count(*) numDrivingDays
from (
select distinct v.TripEndClusterID, v.minDate, i.TripEndDateOffset, v.maxDate
from ids i
inner join vRange v
on i.TripEndDateOffset >= v.minDate
and i.TripEndDateOffset <= v.maxDate
) a
group by TripEndClusterID
'''
drivingDays = sqldf(q, locals()) # NumDrivingDaysInPeriodWithKnownLastTrip
# percent days with park, grouped by cluster
q = '''
select a.TripEndClusterID, a.TripEndDateOffset*1.0 / b.numDrivingDays parkDays_pct
from daysInPeriod a
inner join drivingDays b
on a.TripEndClusterID = b.TripEndClusterID
'''
parkDaysPct = sqldf(q, locals()) # PercDaysWithKLTWithParkAKLT
homeClusters = parkDaysPct[parkDaysPct['parkDays_pct'] > 0.65]
homeClusters = homeClusters[homeClusters['TripEndClusterID'].isin(qualClusters.to_list())]
return homeClusters
def decision2(vData, qualClusters):
parkDistinctTimes = set(vData['TripEndDateOffset']) #### matches check query
missingTrips = set(vData[(vData['resid_Miles'] > 1) | (vData['resid_Miles'] < -1)]['TripEndDateOffset']) #### matches check query
goodTimes = parkDistinctTimes.difference(list(missingTrips)) #### matches check query
goodTimes = pd.DataFrame(list(goodTimes), columns=['TripEndDateOffset']) # convert set to dataframe
vDataGood = vData[vData['TripEndDateOffset'].isin(goodTimes['TripEndDateOffset'])]
totalParkedTimeHr = vDataGood.groupby('TripEndClusterID')['ParkDuration_hr'].sum()
vMin = vData.groupby('TripEndClusterID')['TripEndDateOffset'].min()
vMax = vData.groupby('TripEndClusterID')['TripEndDateOffset'].max()
vRange = pd.concat([vMin, vMax], axis=1)
vRange.columns=['minDate', 'maxDate']
qry = '''
Select A.TripEndClusterID, COUNT(*) NumDrivingDaysWithNoMissingTripsInPeriod
from
( Select distinct B.TripEndClusterID, B.MinDate, A.TripEndDateOffset, B.MaxDate
from goodTimes A
inner join vRange B
on A.TripEndDateOffset >= B.minDate
and A.TripEndDateOffset <= B.maxDate
) A
group by TripEndClusterID
'''
numDrivingDays = sqldf(qry, locals()) # NumDrivingDaysWithNoMissingTripsInPeriod
totalParkedTimeHr = pd.DataFrame(totalParkedTimeHr)
qry = '''
select b.TripEndClusterID, ParkDuration_hr / NumDrivingDaysWithNoMissingTripsInPeriod TotalParkedTimeInPeriodInCluster_hr_PerDrivingDayWNMT
from totalParkedTimeHr a
inner join numDrivingDays b
on a.TripEndClusterID = b.TripEndClusterID
'''
totalParked = sqldf(qry, locals())
homeClusters = totalParked[totalParked['TotalParkedTimeInPeriodInCluster_hr_PerDrivingDayWNMT'] > 9]['TripEndClusterID']
homeClusters = homeClusters[homeClusters.isin(set(qualClusters))]
return homeClusters
def getEVSEDistance(row, homeLat, homeLong):
dist = haversine((row.Latitude, row.Longitude), (homeLat, homeLong), unit=Unit.MILES)
return dist
def getStartLocationDistance(row, homeLat, homeLong, homeStart, homeEnd):
if (homeStart <= row['TripEndLocalTime'] <= homeEnd):
startDist = round(haversine((row['TripStartLatitude'], row['TripStartLongitude']), (homeLat, homeLong), unit=Unit.MILES))
else:
startDist = row['TripStartDistanceFromHome_Miles']
return startDist
def getEndLocationDistance(row, homeLat, homeLong, homeStart, homeEnd):
if (homeStart <= row['TripEndLocalTime'] <= homeEnd):
endDist = round(haversine((row['TripEndLatitude'], row['TripEndLongitude']), (homeLat, homeLong), unit=Unit.MILES))
else:
endDist = row['TripEndDistanceFromHome_Miles']
return endDist
def getLocationInfoData(row, HomeInfo, HomeClusterID):
return 1
def isInHomeCluster(se, TripClusterID, TripLocaltTime, homeInfo):
homeID = -1
inRange = False
## is park in a home cluster (i.e. is park's cluster ID equal to a home cluster ID)
if TripClusterID in set(homeInfo['HomeID']):
homeID = TripClusterID
## in a home cluster, but is park time in home cluster period
inRange = isInRangeSet(se, homeInfo[homeInfo['HomeID'] == homeID][['HomeStartLocalTime', 'HomeEndLocalTime']], TripLocaltTime)
#inRange = True
return homeID, inRange
def isInRangeSet(se, homeStartEnd, locationTime):
if se == 'Start':
if len(homeStartEnd[(homeStartEnd['HomeStartLocalTime'] < locationTime) & (homeStartEnd['HomeEndLocalTime'] >= locationTime)]) > 0:
return True
return False
else:
if len(homeStartEnd[(homeStartEnd['HomeStartLocalTime'] <= locationTime) & (homeStartEnd['HomeEndLocalTime'] > locationTime)]) > 0:
return True
return False
def isInRange(se, homeStartEnd, locationTime):
if se == 'Start':
if homeStartEnd['HomeStartLocalTime'] < locationTime <= homeStartEnd['HomeEndLocalTime']:
return True
return False
else:
if homeStartEnd['HomeStartLocalTime'] <= locationTime < homeStartEnd['HomeEndLocalTime']:
return True
return False
def isInTupleRange(se, start, end, locationTime):
if se == 'Start':
if start < locationTime <= end:
return True
return False
else:
if start <= locationTime < end:
return True
return False
def youHome(row, se, locationTime):
if se == 'Start':
if row['HomeStartLocalTime'] < locationTime <= row['HomeEndLocalTime']:
row['locIn'] = True
row['locIn'] = False
else:
if row['HomeStartLocalTime'] <= locationTime < row['HomeEndLocalTime']:
row['locIn'] = True
row['locIn'] = False
def processHome(v, divisions, vData, vLocationInfo, homeInfo, homeClusters, EVSEs):
for cID in homeClusters:
dfPts = vData[vData['TripEndClusterID'] == cID][['TripEndLatitude', 'TripEndLongitude']]
mpPts = MultiPoint(dfPts.to_numpy())
CP = mpPts.centroid
CP = geometry.Point(CP.y, CP.x)
for i, division in divisions.iterrows():
if division.geometry.contains(CP):
st = EVSEs[(EVSEs['Latitude'] > (CP.y - cfg.evseLatRange)) &
(EVSEs['Latitude'] < (CP.y + cfg.evseLatRange)) &
(EVSEs['Longitude'] > (CP.x - cfg.evseLonRange)) &
(EVSEs['Longitude'] < (CP.x + cfg.evseLonRange))]
if not st.empty:
st['hMiles'] = st.apply(getEVSEDistance, args=(CP.y, CP.x), axis=1)
st = st[st['hMiles'] <= cfg.evseDistRange_Miles]
l2Cnt = 0
dcCnt = 0
if not st.empty:
l2Cnt = st['L2'].sum()
dcCnt = st['DCFC'].sum()
l2Cnt = round(l2Cnt, cfg.numL2Rounding)
dcCnt = round(dcCnt, cfg.numDCRounding)
if l2Cnt == 0: l2Cnt = 1
if dcCnt == 0: dcCnt = 1
homeStart = vData[vData['TripEndClusterID'] == cID]['TripEndLocalTime'].min()
homeEnd = vData[vData['TripEndClusterID'] == cID]['TripEndLocalTime'].max()
newRow = {'VehicleID':int(v), 'HomeID':cID,
'HomeStartLocalTime':homeStart, 'HomeEndLocalTime':homeEnd,
'HomeRegion':division['NAME'], 'PublicChargingDensityL2':l2Cnt, 'PublicChargingDensityDCFC':dcCnt,
'CentroidLatitude':CP.y, 'CentroidLongitude':CP.x, 'Primary': False}
homeInfo = homeInfo.append(newRow, ignore_index=True)
break # exit the division loop
cfg.errorWriter.writerow([datetime.now(), v, -1,'No census division found for cluster.'])
if homeInfo.empty:
cfg.errorWriter.writerow([datetime.now(), v, -1,'No usable clusters found for homeInfo - vehicle skipped.'])
return False, vLocationInfo, homeInfo
# vehicles with only one home are marked as the primary home and primary home detection below is skipped
if len(homeInfo) == 1:
homeInfo['Primary'] = True
else:
# collect period start and period end date for each cluster
# get cluster period start dates of each range
sranges = vData[vData['TripEndClusterID'].isin(homeInfo['HomeID'])].groupby('TripEndClusterID')[['TripEndDateOffset', 'TripEndLocalTime']].min()
eranges = vData[vData['TripEndClusterID'].isin(homeInfo['HomeID'])].groupby('TripEndClusterID')[['TripEndDateOffset', 'TripEndLocalTime']].max()
sranges['period'] = 's'
eranges['period'] = 'e'
# assemble period start/end dates into sorted list of dates
ranges = sranges.append(eranges)
numDates = len(ranges['TripEndDateOffset'])
numUniqDates = len(set(ranges['TripEndDateOffset']))
if numDates != numUniqDates:
cfg.errorWriter.writerow([datetime.now(), v, -1,'Range dates are not unique - vehicle skipped.'])
return False, vLocationInfo, homeInfo
ranges = ranges.sort_values(by=['TripEndDateOffset'])
# make date ranges from first date to second date, second date to third date, etc.
rangesEnd = ranges.shift(-1)
rangesEnd.rename(columns={'TripEndDateOffset': 'End', 'TripEndLocalTime': 'HomeEnd', 'period': 'endperiod'}, inplace=True)
ranges.rename(columns={'TripEndDateOffset': 'Start', 'TripEndLocalTime': 'HomeStart', 'period': 'startperiod'}, inplace=True)
ranges = pd.concat([ranges, rangesEnd], axis=1)
ranges = ranges[:-1] # remove last row
ranges = ranges.reset_index()
# when a range start date originates from a cluster period end, it should be incremented
eidxs = ranges[ranges['startperiod']=='e'].index
erows = ranges[ranges.index.isin(eidxs)]
erows['Start'] += timedelta(days=1)
ranges[ranges.index.isin(eidxs)] = erows
# when a range end date originates from a cluster period start, should be decremented
sidxs = ranges[ranges['endperiod']=='s'].index
srows = ranges[ranges.index.isin(sidxs)]
srows['End'] += timedelta(days=-1)
ranges[ranges.index.isin(sidxs)] = srows
# create column for each home cluster for every park "after known last trip of day" (AKLT) days parked
for x in list(homeInfo['HomeID']): ranges[x] = 0
ranges.drop(['TripEndClusterID'], axis=1, inplace=True)
# get cluster id and end date (offset) for every AKLT
aklt = vData[((vData['TripFlag'] == 'L') | (vData['TripFlag'] == 'FL')) & (vData['resid_Miles'] > -1) & (vData['resid_Miles'] < 1)][['TripEndClusterID','TripEndDateOffset']]
# loop through ranges, counting days parked in each cluster
for ridx, rng in ranges.iterrows():
st = rng['Start']
en = rng['End']
# get number of days parked in given range for each home cluster (returned as a Series)
akltrow = aklt[(aklt['TripEndClusterID'].isin(homeInfo['HomeID'])) & (aklt['TripEndDateOffset'] >= st) & (aklt['TripEndDateOffset'] <= en)].groupby('TripEndClusterID')['TripEndClusterID'].count()
# write days parked to ranges dataframe
for cid, numdays in akltrow.iteritems():
rng[cid] = numdays
ranges.iloc[ridx] = rng
# initialize dataframes
homeInRange = pd.DataFrame(columns = ['cID'])
homeNumDays = pd.DataFrame(columns = ['cID'])
for idx in ranges.index:
homeInRange[idx] = 0
homeNumDays[idx] = 0
for cID in homeInfo['HomeID']:
homeNumDays.loc[len(homeNumDays)] = [cID] + list(ranges[cID])
# find ranges that are within each cluster's period
for idx, homeID in homeInfo['HomeID'].iteritems():
# initialize cluster's within range flag to 0
homeInRange.loc[len(homeInRange.index)] = [homeID] + ([0] * len(ranges))
# index list of ranges within homeID's period
rin = list(ranges[(ranges['HomeStart'] >= homeInfo.iloc[idx]['HomeStartLocalTime']) & (ranges['HomeEnd'] <= homeInfo.iloc[idx]['HomeEndLocalTime'])].index)
# set flag to 1 for ranges that are within the homeID's period
row = homeInRange[homeInRange['cID'] == homeID]
row[rin] = 1
homeInRange[homeInRange['cID'] == homeID] = row
# check number of cluster within range - no cluster = primary home, one cluster = cID is primary home, else leave for number of days check
# get row indexes of homeInfoRange that have more then 1 home in range
r = homeInRange.iloc[:,1:].sum(axis=0)
multiInRangeidxs = r[r[0:] > 1]
ranges['Primary'] = False
# set ranges with single home to primary
primaryIdxs = homeInRange.sum(axis=0)
primaryIdxs = primaryIdxs[primaryIdxs==1]
for idx in primaryIdxs.index:
homeIDIdx = homeInRange[idx].idxmax(axis=0)
homeID = homeInRange.iloc[homeIDIdx]['cID']
row = ranges.iloc[idx]
row['Primary'] = homeID
ranges.iloc[idx] = row
# in ranges with multi homes, if primary home can be determine, set it
for col in list(multiInRangeidxs.index):
# if range has multi homes set those with a single max num of days to primary, else not primary
if len(homeNumDays[col][homeNumDays[col] == homeNumDays[col].max()]) == 1:
idx = homeNumDays[col][homeNumDays[col] == homeNumDays[col].max()].index
homeID = homeNumDays.iloc[idx]['cID'].item()
row = ranges.iloc[col]
row['Primary'] = homeID
ranges.iloc[col] = row
# create an add-on to homeInfo of ranges showing which HomeID was the primary home in that range
newHomeInfo = pd.DataFrame()
for i, row in ranges.iterrows():
# copy the homeInfo row as starting point, then update the other fields in the new row
nh = homeInfo[homeInfo['HomeID'] == row['Primary']]
nh['HomeStartLocalTime'] = row['HomeStart']
nh['HomeEndLocalTime'] = row['HomeEnd']
nh['Primary'] = True
newHomeInfo = newHomeInfo.append(nh, ignore_index=True)
homeInfo['Primary'] = None
homeInfo = homeInfo.append(newHomeInfo)
homeInfo = homeInfo.reset_index(drop=True)
### Update vLocationInfo
for se in ['Start', 'End']:
if se == 'Start':
SEColumnMappings = StartSEColumnMappings()
else:
SEColumnMappings = EndSEColumnMappings()
vLocationInfo = parallel_func_wrapper_update_vlocation(vLocationInfo, vectorizedUpdateVehicleLocationInfo, cfg.numCores, SEColumnMappings=SEColumnMappings, homeInfo=homeInfo)
return True, vLocationInfo, homeInfo
def vectorizedUpdateVehicleLocationInfo(vLocationInfo, SEColumnMappings=None, homeInfo=None):
vLocationInfo = vLocationInfo.copy()
vUpdateVehicleLocatioInfo = np.vectorize(updateVehicleLocationInfo, excluded=['homeInfo'])
result = vUpdateVehicleLocatioInfo(SEColumnMappings.mapType,
vLocationInfo[SEColumnMappings.tripClusterID],
vLocationInfo[SEColumnMappings.tripTime],
vLocationInfo[SEColumnMappings.tripLocation],
vLocationInfo[SEColumnMappings.tripLatitude],
vLocationInfo[SEColumnMappings.tripLongitude],
homeInfo=homeInfo)
vLocationInfo[SEColumnMappings.tripLocation] = result[0]
vLocationInfo[SEColumnMappings.tripHomeID] = result[1]
vLocationInfo[SEColumnMappings.tripDistance] = result[2]
vLocationInfo.loc[vLocationInfo[SEColumnMappings.tripHomeID] == -1, SEColumnMappings.tripHomeID] = np.nan
vLocationInfo.loc[vLocationInfo[SEColumnMappings.tripDistance] == -1, SEColumnMappings.tripDistance] = np.nan
return vLocationInfo
def updateVehicleLocationInfo(mapType, tripClusterID, tripLocaltTime, tripLocation, tripLatitude, tripLongitude, homeInfo=None):
### Set Trip(Start/End)LocationCategory and Trip(Start/End)HomeID
# Does row match match a HomeID and is it in between any Home(Start/Local)LocalTime ranges
homeID, inRange = isInHomeCluster(mapType, tripClusterID, tripLocaltTime, homeInfo)
## did not match a home
if homeID == -1:
category = 'unknown'
# is park in any home cluster
if isInRangeSet(mapType, homeInfo[['HomeStartLocalTime', 'HomeEndLocalTime']], tripLocaltTime):
category = 'away'
newTripLocation = category
newTripHomeID = -1
## matched a home and is in home cluster period
if homeID != -1 and inRange:
newTripLocation = 'home'
newTripHomeID = homeID
## matched a home, but is not in home cluster period
if homeID != -1 and inRange == False:
category = 'unknown'
# is park in any home cluster
if isInRangeSet(mapType, homeInfo[['HomeStartLocalTime', 'HomeEndLocalTime']], tripLocaltTime):
category = 'away'
newTripLocation = category
newTripHomeID = -1
homeLoc = []
for homerow in homeInfo[homeInfo['Primary'] == True].itertuples():
if isInTupleRange(mapType, homerow.HomeStartLocalTime, homerow.HomeEndLocalTime, tripLocaltTime):
homeLoc.extend([homerow[0]])
# no homes
if len(homeLoc) == 0:
newTripDistance = -1
# one distinct home
if len(homeLoc) == 1:
if tripLocation == 'home':
newTripDistance = 0
else:
hm = homeInfo.iloc[homeLoc[0]]
newTripDistance = math.ceil(haversine((tripLatitude, tripLongitude), (hm['CentroidLatitude'], hm['CentroidLongitude']), unit=Unit.MILES))
# in range with multiple homes
if len(homeLoc) > 1:
hm = homeInfo[homeInfo.index.isin(homeLoc) & (homeInfo['Primary'])]
newTripDistance = math.ceil(haversine((tripLatitude, tripLongitude), (hm['CentroidLatitude'], hm['CentroidLongitude']), unit=Unit.MILES))
return (newTripLocation, newTripHomeID, newTripDistance)
def flagTrips(v, vData):
# use offset as end/start of day, e.g. 3:30 AM
vData['TripStartDateOffset'] = (vData['TripStartLocalTime'] - timedelta(hours=cfg.dayEndHours, minutes=cfg.dayEndMinutes)).dt.date
vData['TripEndDateOffset']= (vData['TripEndLocalTime'] - timedelta(hours=cfg.dayEndHours, minutes=cfg.dayEndMinutes)).dt.date
lastIdx = len(vData) - 1
curParkEndDate = vData['TripStartDateOffset'][0:1].item()
vData['TripFlag'][0:1] = 'F'
tripsCnt = 0
# find first and last trips in the day
for i in range(1, lastIdx):
tripsCnt += 1
# compare current (i) record to endDate
if vData['TripEndDateOffset'][i:i+1].item() != curParkEndDate:
vData['TripFlag'][i-1:i] = 'FL' if vData['TripFlag'][i-1:i].item() == 'F' else 'L'
vData['TripFlag'][i:i+1] = 'F'
curParkEndDate = vData['TripEndDateOffset'][i:i+1].item()
tripsCnt = 0
vData['TripFlag'][-1:] = 'FL' if vData['TripFlag'][lastIdx-1:lastIdx].item() == 'L' else 'L'
return vData
def InBoundingBox(vd, colLat, colLon):
"""Check a value (latitude or longitude) to see if it is within the given range"""
if math.isnan(vd[colLat]) or math.isnan(vd[colLon]):
vd['Include'] = False
return vd
x = vd[colLat]
y = vd[colLon]
isFound = False
for k in cfg.bboxes.keys():
x1 = cfg.bboxes[k][0][0] # upper-
y1 = cfg.bboxes[k][0][1] # left coordinates
x2 = cfg.bboxes[k][1][0] # lower-
y2 = cfg.bboxes[k][1][1] # right coordinates
if x > x2 and x < x1 and y > y1 and y < y2: # note: x-axis decreases from bottom to top
isFound = True
break
# don't change any previously "falsed" flags
if not isFound:
vd['Include'] = False
return vd
# check that dates and times are sane
def CheckDateTime(vd, colname):
try:
if pd.isnull(vd[colname]):
vd['Include'] = False
return vd
curdt = datetime.today()
if vd[colname].year < 2011 or vd[colname] > curdt:
vd['Include'] = False
return vd
except ValueError:
vd['Include'] = False
return vd
# check that the Odometer mileage is not less than the calculated mileage from the GPS coordinates
def CompareOdometerToGPS(vd, tripStart, tripEnd, stLat, stLng, enLat, enLng, threshold):
odoDist = vd[tripEnd] - vd[tripStart]
GPSDist = haversine((vd[stLat], vd[stLng]), (vd[enLat], vd[enLng]), unit=Unit.MILES)
if (odoDist - GPSDist) < threshold:
vd.Include = False
return vd
# check various types of data with the vehicle data data frame and return data frame with the Include flag set
def DoValidityChecking(v, vData):
incl = vData
incl = incl.apply(lambda x: CheckDateTime(x, 'TripStartLocalTime'), axis=1)
startErrs = incl['Include'][incl['Include'] == False].count()
if startErrs > 0: cfg.errorWriter.writerow([datetime.now(), v, -1, 'TripStartLocalTimes ({})'.format(startErrs)])
incl = incl.apply(lambda x: CheckDateTime(x, 'TripEndLocalTime'), axis=1)
endErrs = incl['Include'][incl['Include'] == False].count() - startErrs
if endErrs > 0: cfg.errorWriter.writerow([datetime.now(), v, -1, 'TripEndLocalTime is bad ({})'.format(endErrs)])
incl = incl.apply(lambda x: InBoundingBox(x, 'TripStartLatitude', 'TripStartLongitude'), axis=1)
startPtErrs = incl['Include'][incl['Include'] == False].count() - startErrs - endErrs
if startPtErrs > 0: cfg.errorWriter.writerow([datetime.now(), v, -1, 'TripStartLatitude is bad ({})'.format(startPtErrs)])
incl = incl.apply(lambda x: InBoundingBox(x, 'TripEndLatitude', 'TripEndLongitude'), axis=1)
endPtErrs = incl['Include'][incl['Include'] == False].count() - startErrs - endErrs - startPtErrs
if endPtErrs > 0: cfg.errorWriter.writerow([datetime.now(), v, -1, 'TripEndLongitude is bad ({})'.format(endPtErrs)])
incl = incl.apply(lambda x: CompareOdometerToGPS(x, 'TripStartOdometer_Miles', 'TripEndOdometer_Miles',
'TripStartLatitude', 'TripStartLongitude',
'TripEndLatitude', 'TripEndLongitude', cfg.gpsOdoThreshold_mi), axis=1)
odoErrs = incl['Include'][incl['Include'] == False].count() - startErrs - endErrs - startPtErrs - endPtErrs
if odoErrs > 0: cfg.errorWriter.writerow([datetime.now(), v, -1, 'Trip ODO < straight line distance ({})'.format(odoErrs)])
return incl
# find clusters of data using latitude and longitude of trip start and end
def clusterData(v,vData):
kms_per_radian = 6371.0088
epsilon = (cfg.dbscan_eps_ft / 3281) / kms_per_radian
minSamples = cfg.dbscan_min_spls
startPts = vData[['TripStartLatitude', 'TripStartLongitude']].to_numpy()
endPts = vData[['TripEndLatitude', 'TripEndLongitude']].to_numpy()
coords = np.append(startPts, endPts, axis=0)
coordsSet = np.unique(coords, axis=0)
db = DBSCAN(eps=epsilon, min_samples=minSamples, algorithm='ball_tree', metric='haversine').fit(np.radians(coordsSet))
clusterLbls = db.labels_ # db.labels seems to be an array of cluster IDs mapping to the coords array
coordsClusters = | pd.DataFrame(coordsSet) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import copy
import numpy as np
import pandas as pd
import math
from functools import partial
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A, QT
from pypower.idx_bus import BASE_KV
from pandapower.auxiliary import get_indices, get_values
def _build_branch_ppc(net, ppc, is_elems, bus_lookup, calculate_voltage_angles, trafo_model, set_opf_constraints=False):
"""
Takes the empty ppc network and fills it with the branch values. The branch
datatype will be np.complex 128 afterwards.
.. note:: The order of branches in the ppc is:
1. Lines
2. Transformers
3. 3W Transformers (each 3W Transformer takes up three branches)
4. Impedances
5. Internal branch for extended ward
**INPUT**:
**net** -The Pandapower format network
**ppc** - The PYPOWER format network to fill in values
"""
# if len(net["trafo3w"]) > 0:
# _one_3w_to_three_2w(net)
line_end = len(net["line"])
trafo_end = line_end + len(net["trafo"])
trafo3w_end = trafo_end + len(net["trafo3w"]) * 3
impedance_end = trafo3w_end + len(net["impedance"])
xward_end = impedance_end + len(net["xward"])
ppc["branch"] = np.zeros(shape=(xward_end, QT + 1), dtype=np.complex128)
ppc["branch"][:, :13] = np.array([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -360, 360])
if line_end > 0:
ppc["branch"][:line_end, [F_BUS, T_BUS, BR_R, BR_X, BR_B, BR_STATUS, RATE_A]] = \
_calc_line_parameter(net, ppc, bus_lookup, set_opf_constraints)
if trafo_end > line_end:
ppc["branch"][line_end:trafo_end,
[F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A]] = \
_calc_trafo_parameter(net, ppc, bus_lookup, calculate_voltage_angles,
trafo_model, set_opf_constraints)
if trafo3w_end > trafo_end:
ppc["branch"][trafo_end:trafo3w_end, [F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS]] = \
_calc_trafo3w_parameter(net, ppc, bus_lookup, calculate_voltage_angles, trafo_model)
if impedance_end > trafo3w_end:
ppc["branch"][trafo3w_end:impedance_end, [F_BUS, T_BUS, BR_R, BR_X, BR_STATUS]] = \
_calc_impedance_parameter(net, bus_lookup)
if xward_end > impedance_end:
ppc["branch"][impedance_end:xward_end, [F_BUS, T_BUS, BR_R, BR_X, BR_STATUS]] = \
_calc_xward_parameter(net, ppc, is_elems, bus_lookup)
def _calc_trafo3w_parameter(net, ppc, bus_lookup, calculate_voltage_angles, trafo_model):
trafo_df = _trafo_df_from_trafo3w(net)
temp_para = np.zeros(shape=(len(trafo_df), 8), dtype=np.complex128)
temp_para[:, 0] = get_indices(trafo_df["hv_bus"].values, bus_lookup)
temp_para[:, 1] = get_indices(trafo_df["lv_bus"].values, bus_lookup)
temp_para[:, 2:6] = _calc_branch_values_from_trafo_df(
net, ppc, bus_lookup, trafo_model, trafo_df)
if calculate_voltage_angles:
temp_para[:, 6] = trafo_df["shift_degree"].values
else:
temp_para[:, 6] = np.zeros(shape=(len(trafo_df.index),), dtype=np.complex128)
temp_para[:, 7] = trafo_df["in_service"].values
return temp_para
def _calc_line_parameter(net, ppc, bus_lookup, set_opf_constraints=False):
"""
calculates the line parameter in per unit.
**INPUT**:
**net** -The Pandapower format network
**RETURN**:
**t** - Temporary line parameter. Which is a complex128
Nunmpy array. with the following order:
0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu
"""
# baseR converts Ohm to p.u. Formula is U^2/Sref. Sref is 1 MVA and vn_kv is
# in kV U^2* ((10^3 V)^2/10^6 VA) = U^2
# Therefore division by 1 MVA is not necessary.
line = net["line"]
fb = get_indices(line["from_bus"], bus_lookup)
tb = get_indices(line["to_bus"], bus_lookup)
length = line["length_km"].values
parallel = line["parallel"]
baseR = np.square(ppc["bus"][fb, BASE_KV])
t = np.zeros(shape=(len(line.index), 7), dtype=np.complex128)
t[:, 0] = fb
t[:, 1] = tb
t[:, 2] = line["r_ohm_per_km"] * length / baseR / parallel
t[:, 3] = line["x_ohm_per_km"] * length / baseR / parallel
t[:, 4] = 2 * net.f_hz * math.pi * line["c_nf_per_km"] * 1e-9 * baseR * length * parallel
t[:, 5] = line["in_service"]
if set_opf_constraints:
max_load = line.max_loading_percent if "max_loading_percent" in line else 1000
vr = net.bus.vn_kv[fb].values * np.sqrt(3)
t[:, 6] = max_load / 100 * line.imax_ka * line.df * parallel * vr
return t
def _calc_trafo_parameter(net, ppc, bus_lookup, calculate_voltage_angles, trafo_model,
set_opf_constraints=False):
'''
Calculates the transformer parameter in per unit.
**INPUT**:
**net** - The Pandapower format network
**RETURN**:
**temp_para** -
Temporary transformer parameter. Which is a np.complex128
Numpy array. with the following order:
0:hv_bus; 1:lv_bus; 2:r_pu; 3:x_pu; 4:b_pu; 5:tab, 6:shift
'''
temp_para = np.zeros(shape=(len(net["trafo"].index), 9), dtype=np.complex128)
trafo = net["trafo"]
temp_para[:, 0] = get_indices(trafo["hv_bus"].values, bus_lookup)
temp_para[:, 1] = get_indices(trafo["lv_bus"].values, bus_lookup)
temp_para[:, 2:6] = _calc_branch_values_from_trafo_df(net, ppc, bus_lookup, trafo_model)
if calculate_voltage_angles:
temp_para[:, 6] = trafo["shift_degree"].values
else:
temp_para[:, 6] = np.zeros(shape=(len(trafo.index),), dtype=np.complex128)
temp_para[:, 7] = trafo["in_service"].values
if set_opf_constraints:
max_load = trafo.max_loading_percent if "max_loading_percent" in trafo else 1000
temp_para[:, 8] = max_load / 100 * trafo.sn_kva / 1000
return temp_para
def _calc_branch_values_from_trafo_df(net, ppc, bus_lookup, trafo_model, trafo_df=None):
"""
Calculates the MAT/PYPOWER-branch-attributes from the pandapower trafo dataframe.
PYPOWER and MATPOWER uses the PI-model to model transformers.
This function calculates the resistance r, reactance x, complex susceptance c and the tap ratio
according to the given parameters.
.. warning:: This function returns the subsceptance b as a complex number
**(-img + -re*i)**. MAT/PYPOWER is only intended to calculate the
imaginary part of the subceptance. However, internally c is
multiplied by i. By using subsceptance in this way, it is possible
to consider the ferromagnetic loss of the coil. Which would
otherwise be neglected.
.. warning:: Tab switches effect calculation as following:
On **high-voltage** side(=1) -> only **tab** gets adapted.
On **low-voltage** side(=2) -> **tab, x, r** get adapted.
This is consistent with Sincal.
The Sincal method in this case is questionable.
**INPUT**:
**pd_trafo** - The Pandapower format Transformer Dataframe.
The Transformer modell will only readfrom pd_net
**RETURN**:
**temp_para** - Temporary transformer parameter. Which is a complex128
Nunmpy array. with the following order:
0:r_pu; 1:x_pu; 2:b_pu; 3:tab;
"""
if trafo_df is None:
trafo_df = net["trafo"]
baseR = np.square(get_values(ppc["bus"][:, BASE_KV], trafo_df["lv_bus"].values,
bus_lookup))
### Construct np.array to parse results in ###
# 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;
temp_para = np.zeros(shape=(len(trafo_df), 4), dtype=np.complex128)
unh, unl = _calc_vn_from_dataframe(trafo_df)
r, x, y = _calc_r_x_y_from_dataframe(trafo_df, unl, baseR, trafo_model)
temp_para[:, 0] = r
temp_para[:, 1] = x
temp_para[:, 2] = y
temp_para[:, 3] = _calc_tap_from_dataframe(ppc, trafo_df, unh, unl, bus_lookup)
return temp_para
def _calc_r_x_y_from_dataframe(trafo_df, unl, baseR, trafo_model):
y = _calc_y_from_dataframe(trafo_df, baseR)
r, x = _calc_r_x_from_dataframe(trafo_df)
if trafo_model == "pi":
return r, x, y
elif trafo_model == "t":
return _wye_delta(r, x, y)
else:
raise ValueError("Unkonwn Transformer Model %s - valid values ar 'pi' or 't'" % trafo_model)
def _wye_delta(r, x, y):
"""
20.05.2016 added by <NAME>
Calculate transformer Pi-Data based on T-Data
"""
tidx = np.where(y != 0)
za_star = (r[tidx] + x[tidx] * 1j) / 2
zc_star = -1j / y[tidx]
zSum_triangle = za_star * za_star + 2 * za_star * zc_star
zab_triangle = zSum_triangle / zc_star
zbc_triangle = zSum_triangle / za_star
r[tidx] = zab_triangle.real
x[tidx] = zab_triangle.imag
y[tidx] = -2j / zbc_triangle
return r, x, y
def _calc_y_from_dataframe(trafo_df, baseR):
"""
Calculate the subsceptance y from the transformer dataframe.
INPUT:
**trafo** (Dataframe) - The dataframe in net.trafo
which contains transformer calculation values.
RETURN:
**subsceptance** (1d array, np.complex128) - The subsceptance in pu in
the form (-b_img, -b_real)
"""
### Calculate subsceptance ###
unl_squared = trafo_df["vn_lv_kv"].values**2
b_real = trafo_df["pfe_kw"].values / (1000. * unl_squared) * baseR
b_img = (trafo_df["i0_percent"].values / 100. * trafo_df["sn_kva"].values / 1000.)**2 \
- (trafo_df["pfe_kw"].values / 1000.)**2
b_img[b_img < 0] = 0
b_img = np.sqrt(b_img) * baseR / unl_squared
return -b_real * 1j - b_img
def _calc_vn_from_dataframe(trafo_df):
"""
Adjust the nominal voltage vnh and vnl to the active tab position "tp_pos".
If "side" is 1 (high-voltage side) the high voltage vnh is adjusted.
If "side" is 2 (low-voltage side) the low voltage vnl is adjusted
INPUT:
**trafo** (Dataframe) - The dataframe in pd_net["structure"]["trafo"]
which contains transformer calculation values.
RETURN:
**vn_hv_kv** (1d array, float) - The adusted high voltages
**vn_lv_kv** (1d array, float) - The adjusted low voltages
"""
# Changing Voltage on high-voltage side
unh = copy.copy(trafo_df["vn_hv_kv"].values)
m = (trafo_df["tp_side"] == "hv").values
tap_os = np.isfinite(trafo_df["tp_pos"].values) & m
if any(tap_os):
unh[tap_os] *= np.ones((tap_os.sum()), dtype=np.float) + \
(trafo_df["tp_pos"].values[tap_os] - trafo_df["tp_mid"].values[tap_os]) * \
trafo_df["tp_st_percent"].values[tap_os] / 100.
# Changing Voltage on high-voltage side
unl = copy.copy(trafo_df["vn_lv_kv"].values)
tap_us = np.logical_and(np.isfinite(trafo_df["tp_pos"].values),
(trafo_df["tp_side"] == "lv").values)
if any(tap_us):
unl[tap_us] *= np.ones((tap_us.sum()), dtype=np.float) \
+ (trafo_df["tp_pos"].values[tap_us] - trafo_df["tp_mid"].values[tap_us]) \
* trafo_df["tp_st_percent"].values[tap_us] / 100.
return unh, unl
def _calc_r_x_from_dataframe(trafo_df):
"""
Calculates (Vectorized) the resitance and reactance according to the
transformer values
"""
z_sc = trafo_df["vsc_percent"].values / 100. / trafo_df.sn_kva.values * 1000.
r_sc = trafo_df["vscr_percent"].values / 100. / trafo_df.sn_kva.values * 1000.
x_sc = np.sqrt(z_sc**2 - r_sc**2)
return r_sc, x_sc
def _calc_tap_from_dataframe(ppc, trafo_df, vn_hv_kv, vn_lv_kv, bus_lookup):
"""
Calculates (Vectorized) the off nominal tap ratio::
(vn_hv_kv / vn_lv_kv) / (ub1_in_kv / ub2_in_kv)
INPUT:
**net** (Dataframe) - The net for which to calc the tap ratio.
**vn_hv_kv** (1d array, float) - The adjusted nominal high voltages
**vn_lv_kv** (1d array, float) - The adjusted nominal low voltages
RETURN:
**tab** (1d array, float) - The off-nominal tap ratio
"""
# Calculating tab (trasformer off nominal turns ratio)
tap_rat = vn_hv_kv / vn_lv_kv
nom_rat = get_values(ppc["bus"][:, BASE_KV], trafo_df["hv_bus"].values, bus_lookup) / \
get_values(ppc["bus"][:, BASE_KV], trafo_df["lv_bus"].values, bus_lookup)
return tap_rat / nom_rat
def z_br_to_bus(z, s):
zbr_n = s[0] * np.array([z[0] / min(s[0], s[1]), z[1] /
min(s[1], s[2]), z[2] / min(s[0], s[2])])
return .5 * s / s[0] * np.array([(zbr_n[0] + zbr_n[2] - zbr_n[1]),
(zbr_n[1] + zbr_n[0] - zbr_n[2]),
(zbr_n[2] + zbr_n[1] - zbr_n[0])])
def _trafo_df_from_trafo3w(net):
trafos2w = {}
nr_trafos = len(net["trafo3w"])
tap_variables = ("tp_pos", "tp_mid", "tp_max", "tp_min", "tp_st_percent")
i = 0
for _, ttab in net["trafo3w"].iterrows():
uk = np.array([ttab.vsc_hv_percent, ttab.vsc_mv_percent, ttab.vsc_lv_percent])
ur = np.array([ttab.vscr_hv_percent, ttab.vscr_mv_percent, ttab.vscr_lv_percent])
sn = np.array([ttab.sn_hv_kva, ttab.sn_mv_kva, ttab.sn_lv_kva])
uk_2w = z_br_to_bus(uk, sn)
ur_2w = z_br_to_bus(ur, sn)
taps = [{tv: np.nan for tv in tap_variables} for _ in range(3)]
for k in range(3):
taps[k]["tp_side"] = None
if pd.notnull(ttab.tp_side):
if ttab.tp_side == "hv":
tp_trafo = 0
elif ttab.tp_side == "mv":
tp_trafo = 1
elif ttab.tp_side == "lv":
tp_trafo = 3
for tv in tap_variables:
taps[tp_trafo][tv] = ttab[tv]
taps[tp_trafo]["tp_side"] = "hv" if tp_trafo == 0 else "lv"
trafos2w[i] = {"hv_bus": ttab.hv_bus, "lv_bus": ttab.ad_bus, "sn_kva": ttab.sn_hv_kva,
"vn_hv_kv": ttab.vn_hv_kv, "vn_lv_kv": ttab.vn_hv_kv, "vscr_percent": ur_2w[0],
"vsc_percent": uk_2w[0], "pfe_kw": ttab.pfe_kw,
"i0_percent": ttab.i0_percent, "tp_side": taps[0]["tp_side"],
"tp_mid": taps[0]["tp_mid"], "tp_max": taps[0]["tp_max"],
"tp_min": taps[0]["tp_min"], "tp_pos": taps[0]["tp_pos"],
"tp_st_percent": taps[0]["tp_st_percent"],
"in_service": ttab.in_service, "shift_degree": 0}
trafos2w[i + nr_trafos] = {"hv_bus": ttab.ad_bus, "lv_bus": ttab.mv_bus,
"sn_kva": ttab.sn_mv_kva, "vn_hv_kv": ttab.vn_hv_kv, "vn_lv_kv": ttab.vn_mv_kv,
"vscr_percent": ur_2w[1], "vsc_percent": uk_2w[1], "pfe_kw": 0,
"i0_percent": 0, "tp_side": taps[1]["tp_side"],
"tp_mid": taps[1]["tp_mid"], "tp_max": taps[1]["tp_max"],
"tp_min": taps[1]["tp_min"], "tp_pos": taps[1]["tp_pos"],
"tp_st_percent": taps[1]["tp_st_percent"],
"in_service": ttab.in_service, "shift_degree": ttab.shift_mv_degree}
trafos2w[i + 2 * nr_trafos] = {"hv_bus": ttab.ad_bus, "lv_bus": ttab.lv_bus,
"sn_kva": ttab.sn_lv_kva,
"vn_hv_kv": ttab.vn_hv_kv, "vn_lv_kv": ttab.vn_lv_kv, "vscr_percent": ur_2w[2],
"vsc_percent": uk_2w[2], "pfe_kw": 0, "i0_percent": 0,
"tp_side": taps[2]["tp_side"], "tp_mid": taps[2]["tp_mid"],
"tp_max": taps[2]["tp_max"], "tp_min": taps[2]["tp_min"],
"tp_pos": taps[2]["tp_pos"], "tp_st_percent": taps[2]["tp_st_percent"],
"in_service": ttab.in_service, "shift_degree": ttab.shift_lv_degree}
i += 1
trafo_df = | pd.DataFrame(trafos2w) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
import os
import sys
import codecs
import numpy as np
import pandas as pd
import torch
import jieba
from gensim.models import KeyedVectors
from torch.utils.data import TensorDataset, DataLoader, Dataset
# DOCKER data file
data_dir = "/data"
jieba.load_userdict(data_dir+"/cut_dict_uniq.txt")
STOPWORDS = [line.strip() for line in codecs.open(data_dir+"/stopwords_1009.txt", "r", "utf-8").readlines()]
# =================================
# Char Data Loader (Embedding)
# =================================
def load_embedding():
char_vectors = KeyedVectors.load_word2vec_format(data_dir+"/embedding_char_300.bin", binary=True)
char2index = {}
zeros = np.zeros(char_vectors.vectors.shape[1], dtype=np.float32)
embedding = np.insert(char_vectors.vectors, 0, zeros, axis=0)
print("Char Embedding: ", embedding.shape)
padding_value = 0
for i, w in enumerate(char_vectors.index2word):
char2index[w] = i + 1
return embedding, char2index, padding_value
class OnlineQA(Dataset):
def __init__(self, max_len, data_fn, char2index):
self.char2index = char2index
self.max_len = max_len
self.load(data_fn)
self.y = torch.LongTensor(self.df["label"].tolist())
def load(self, data_fn):
self.df = pd.read_csv(data_dir+"/{}".format(data_fn)).reset_index(drop=True)
self.label = | pd.unique(self.df["label"]) | pandas.unique |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), names=cols)
def test_utf8_bom(self):
# see gh-4793
bom = u('\ufeff')
utf8 = 'utf-8'
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
# basic test
data = 'a\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8)
tm.assert_frame_equal(out, expected)
# test with "regular" quoting
data = '"a"\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, quotechar='"')
tm.assert_frame_equal(out, expected)
# test in a data row instead of header
data = 'b\n1'
expected = DataFrame({'a': ['b', '1']})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'])
tm.assert_frame_equal(out, expected)
# test in empty data row with skipping
data = '\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=True)
tm.assert_frame_equal(out, expected)
# test in empty data row without skipping
data = '\n1'
expected = DataFrame({'a': [np.nan, 1.0]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=False)
tm.assert_frame_equal(out, expected)
def test_temporary_file(self):
# see gh-13398
data1 = "0 0"
from tempfile import TemporaryFile
new_file = TemporaryFile("w+")
new_file.write(data1)
new_file.flush()
new_file.seek(0)
result = self.read_csv(new_file, sep=r'\s+', header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_read_csv_utf_aliases(self):
# see gh issue 13549
expected = pd.DataFrame({'mb_num': [4.8], 'multibyte': ['test']})
for byte in [8, 16]:
for fmt in ['utf-{0}', 'utf_{0}', 'UTF-{0}', 'UTF_{0}']:
encoding = fmt.format(byte)
data = 'mb_num,multibyte\n4.8,test'.encode(encoding)
result = self.read_csv(BytesIO(data), encoding=encoding)
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(self):
# see gh-5500
data = "a,b\n1\x1a,2"
expected = pd.DataFrame([["1\x1a", 2]], columns=['a', 'b'])
result = self.read_csv(StringIO(data))
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import numpy as np, pandas as pd, arviz as az, seaborn as sns, matplotlib.pyplot as plt
from cmdstanpy import CmdStanModel
raw_data = pd.read_csv("data/challenger_data.csv")
raw_data["Date"] = | pd.to_datetime(raw_data["Date"], infer_datetime_format=True) | pandas.to_datetime |
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import string, sys, re
import pandas as pd
import geopandas as gpd
from pyspark.sql.types import *
from pyspark.sql import SparkSession
from geospark.register import upload_jars
from geospark.register import GeoSparkRegistrator
# Create Spark Session
spark = SparkSession.builder.\
appName("SparkSessionExample").\
getOrCreate()
# Uses findspark Python package to upload jar files to executor and nodes.
upload_jars()
# Registers all GeoSparkSQL functions
GeoSparkRegistrator.registerAll(spark)
# Load matrix of coordinates and US county data into Spark and GeoPandas
original_matrix_df = spark.read.format("csv").option("header", "true").load("geospark_matrix.csv")
original_geo_df = gpd.read_file("cb_2018_us_county_500k/cb_2018_us_county_500k.shp")
# Map Polygon in geometry field of geo_d fto WKT (well-known-text) format and rename as counties_df
wkts = map(lambda g: str(g.to_wkt()), original_geo_df.geometry)
original_geo_df['wkt'] = | pd.Series(wkts) | pandas.Series |
#!/usr/bin/env python3
"""A tool to plot cumulative time taken for each planner to solve a collection
of problems. Really needs to be renamed to avoid confusion with
solution_time_plot.py."""
from argparse import ArgumentParser
from collections import OrderedDict
from json import load
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from asnets.scripts.solution_time_plot import add_common_parser_opts
parser = ArgumentParser(
description="plots of total running time for ICAPS/JFPDA slides (and "
"maybe JAIR too)")
parser.add_argument('--save',
metavar='PATH',
type=str,
default=None,
help="destination file for graph")
parser.add_argument('--no-legend',
dest='add_legend',
default="brief",
action='store_false',
help='disable legend')
parser.add_argument('--dims',
nargs=2,
type=float,
metavar=('WIDTH', 'HEIGHT'),
default=[7, 3],
help="dimensions (in inches) for saved plot")
parser.add_argument('--xmax',
type=int,
help='maximum time to show along x-axis')
parser.add_argument('--title', help='title for the plot')
parser.add_argument(
'--presentation',
default=False,
action='store_true',
help='use typeface/display choices appropriate for presentation (rather '
'than printed work)')
add_common_parser_opts(parser)
def _load_inner_df(expt_str, args):
"""Load single experiment outcome as dataframe."""
try:
label, path = expt_str.split(':', 1)
except ValueError as e:
print('Could not parse label:path pair "%s"' % expt_str)
raise e
# this is for data that we don't have because one method took forever to
# solve anything :(
no_data = path == 'EMPTY'
if no_data:
data = {'eval_names': [], 'eval_sizes': [], 'eval_runs': []}
else:
# load data
with open(path, 'r') as fp:
data = load(fp)
# sometimes we have more names than sizes for some reasons (probably
# collate_data is broken)
num_runs_set = set(
map(len, [data['eval_sizes'], data['eval_runs'], data['eval_names']]))
assert len(num_runs_set), "inconsistent sizes (%s)" % (num_runs_set, )
# this is used for ASNets
train_time = data.get('train_time', 0)
# These are sometimes used by the calling code. name_arch is the name of
# the architecture module (e.g. actprop_2l), and name_expt is the name of
# the experiment module (e.g. ex_blocksworld).
name_arch = data.get('name_arch', 'EMPTY')
name_expt = data.get('name_expt', 'EMPTY')
# record format:
# {
# "problem": <problem-name>,
# "problem_size": <problem-size>,
# "method": <algorithm-name>,
# "goal_reached": <goal-reached>,
# "cost": <cost-or-deadend-cost>,
# "time": <time-or-timeout>,
# "time_raw": <ray-time-maybe-none>,
# "run_seq_num": <run-num-in-sequence>,
# }
# We'll make a DataFrame out of those!
records = []
for name, size, data_dict in zip(data['eval_names'], data['eval_sizes'],
data['eval_runs']):
for seq_num, (goal_reached, cost, time) in enumerate(
zip(data_dict['goal_reached'], data_dict['cost'],
data_dict['time'])):
if time is None:
time_or_timeout = args.timeout
else:
# always add in training time
time_or_timeout = time + train_time
record = {
"problem": name,
"problem_size": size,
"method": label,
"goal_reached": goal_reached,
"cost": cost,
"time": time_or_timeout,
# use nans instead of None to hint to Pandas that this series
# should be float, and not object
"time_raw": time if time is not None else float('nan'),
"train_time": train_time,
"run_seq_num": seq_num,
"name_arch": name_arch,
"name_expt": name_expt,
}
records.append(record)
frame = | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
# 预处理复赛数据
import os
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
import numpy as np
from sklearn.metrics import f1_score
path = './'
w2v_path = path + '/w2v'
train = pd.read_csv(path + '/train_2.csv')
test = pd.read_csv(path + '/test_2.csv')
train_stacking = | pd.read_csv(path + '/stack/train.csv') | pandas.read_csv |
# pylint: disable=missing-docstring
from copy import copy
from unittest.mock import patch, call
from datetime import datetime, timedelta
from django.test import TestCase
from django.utils import timezone
from django.db.utils import DataError
from django.core.exceptions import ValidationError
import pandas as pd
from server.models import Match, Team
from server.tests.fixtures import data_factories
from server.tests.fixtures.factories import FullMatchFactory
class TestMatch(TestCase):
fixtures = ["ml_models.json"]
def setUp(self):
match_datetime = timezone.make_aware(datetime(2018, 5, 5))
self.match = Match.objects.create(
start_date_time=match_datetime, round_number=5, venue="Corporate Stadium"
)
self.home_team = Team.objects.create(name="Richmond")
self.away_team = Team.objects.create(name="Melbourne")
self.match.teammatch_set.create(
team=self.home_team, match=self.match, at_home=True, score=50
)
self.match.teammatch_set.create(
team=self.away_team, match=self.match, at_home=False, score=100
)
def test_get_or_create_from_raw_data(self):
fixture_data = data_factories.fake_fixture_data().to_dict("records")[0]
match_count = Match.objects.count()
with self.subTest("with validation error"):
invalid_fixture_data = copy(fixture_data)
invalid_fixture_data["venue"] = "venue" * 25
with self.assertRaises(DataError):
Match.get_or_create_from_raw_data(invalid_fixture_data)
self.assertEqual(Match.objects.count(), match_count)
created_match = Match.get_or_create_from_raw_data(fixture_data)
self.assertIsInstance(created_match, Match)
self.assertEqual(Match.objects.count(), match_count + 1)
with self.subTest("with existing match record"):
gotten_match = Match.get_or_create_from_raw_data(fixture_data)
self.assertEqual(gotten_match, created_match)
self.assertEqual(Match.objects.count(), match_count + 1)
def test_played_without_results(self):
FullMatchFactory(
start_date_time=timezone.localtime() - timedelta(days=1),
home_team_match__score=0,
away_team_match__score=0,
)
FullMatchFactory(
start_date_time=timezone.localtime() - timedelta(days=1),
home_team_match__score=50,
away_team_match__score=80,
)
FullMatchFactory(
start_date_time=timezone.localtime() + timedelta(days=1),
home_team_match__score=0,
away_team_match__score=0,
)
played_matches_without_results = Match.played_without_results()
self.assertEqual(played_matches_without_results.count(), 1)
def test_earliest_date_time_without_results(self):
FullMatchFactory(
start_date_time=timezone.localtime() - timedelta(days=1),
home_team_match__score=50,
away_team_match__score=80,
)
FullMatchFactory(
start_date_time=timezone.localtime() + timedelta(days=1),
home_team_match__score=0,
away_team_match__score=0,
)
with self.subTest("when all matches have results or haven't been played"):
earliest_date_time_without_results = (
Match.earliest_date_time_without_results()
)
self.assertIsNone(earliest_date_time_without_results)
played_resultless = FullMatchFactory(
start_date_time=timezone.localtime() - timedelta(days=1),
home_team_match__score=0,
away_team_match__score=0,
)
earliest_date_time_without_results = Match.earliest_date_time_without_results()
self.assertEqual(
played_resultless.start_date_time, earliest_date_time_without_results
)
@patch("server.models.match.Match.update_result")
def test_update_results(self, mock_update_result):
match_results = data_factories.fake_match_results_data()
calls = []
for _idx, match_result in match_results.iterrows():
FullMatchFactory(
home_team_match__score=0,
away_team_match__score=0,
start_date_time=match_result["date"],
round_number=match_result["round_number"],
home_team_match__team__name=match_result["home_team"],
away_team_match__team__name=match_result["away_team"],
venue=match_result["venue"],
)
calls.append(call(match_result))
Match.update_results(match_results)
self.assertEqual(mock_update_result.call_count, len(match_results))
def test_update_result(self):
with self.subTest("When the match hasn't been played yet"):
match = FullMatchFactory(
future=True,
with_predictions=True,
home_team_match__score=0,
away_team_match__score=0,
)
match.update_result(pd.DataFrame())
# It doesn't update match scores
score_sum = sum(match.teammatch_set.values_list("score", flat=True))
self.assertEqual(score_sum, 0)
# It doesn't update prediction correctness
self.assertEqual(
match.prediction_set.filter(is_correct__in=[True, False]).count(),
0,
)
# It doesn't update match winner or margin
self.assertIsNone(match.winner)
self.assertIsNone(match.margin)
with self.subTest("When the match doesn't have results yet"):
with self.subTest("and has been played within the last week"):
yesterday = timezone.now() - timedelta(days=1)
match = FullMatchFactory(
with_predictions=True,
start_date_time=yesterday,
home_team_match__score=0,
away_team_match__score=0,
prediction__is_correct=None,
prediction_two__is_correct=None,
)
match.winner = None
match.margin = None
match.update_result( | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import nltk
class Sentiment():
def __init__(self, data_rpath='data/'):
# download nltk tokenizer
nltk.download('punkt')
# load data
self._load_data(os.path.join(data_rpath, 'corpus.csv'))
# build dictionary from data
dictionary_path = os.path.join(data_rpath, 'dictionary.csv')
# if not os.path.exists(dictionary_path):
self._build_dictionary(np.concatenate([self.x_train, self.x_test, self.x_val]), dictionary_path)
self.dictionary = self._load_dictionary(dictionary_path)
def _load_data(self, path, val_size=100, test_size=100):
data = | pd.read_csv(path, sep='\t', header=None) | pandas.read_csv |
# Copyright 2019, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology
# Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export
# licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign
# persons.
"""
==============
test_subset.py
==============
Test the subsetter functionality.
"""
import json
import operator
import os
import shutil
import tempfile
import unittest
from os import listdir
from os.path import dirname, join, realpath, isfile, basename
import geopandas as gpd
import importlib_metadata
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from jsonschema import validate
from shapely.geometry import Point
from podaac.subsetter import subset
from podaac.subsetter.subset import SERVICE_NAME
from podaac.subsetter import xarray_enhancements as xre
class TestSubsetter(unittest.TestCase):
"""
Unit tests for the L2 subsetter. These tests are all related to the
subsetting functionality itself, and should provide coverage on the
following files:
- podaac.subsetter.subset.py
- podaac.subsetter.xarray_enhancements.py
"""
@classmethod
def setUpClass(cls):
cls.test_dir = dirname(realpath(__file__))
cls.test_data_dir = join(cls.test_dir, 'data')
cls.subset_output_dir = tempfile.mkdtemp(dir=cls.test_data_dir)
cls.test_files = [f for f in listdir(cls.test_data_dir)
if isfile(join(cls.test_data_dir, f)) and f.endswith(".nc")]
cls.history_json_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://harmony.earthdata.nasa.gov/history.schema.json",
"title": "Data Processing History",
"description": "A history record of processing that produced a given data file. For more information, see: https://wiki.earthdata.nasa.gov/display/TRT/In-File+Provenance+Metadata+-+TRT-42",
"type": ["array", "object"],
"items": {"$ref": "#/definitions/history_record"},
"definitions": {
"history_record": {
"type": "object",
"properties": {
"date_time": {
"description": "A Date/Time stamp in ISO-8601 format, including time-zone, GMT (or Z) preferred",
"type": "string",
"format": "date-time"
},
"derived_from": {
"description": "List of source data files used in the creation of this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program": {
"description": "The name of the program which generated this data file",
"type": "string"
},
"version": {
"description": "The version identification of the program which generated this data file",
"type": "string"
},
"parameters": {
"description": "The list of parameters to the program when generating this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program_ref": {
"description": "A URL reference that defines the program, e.g., a UMM-S reference URL",
"type": "string"
},
"$schema": {
"description": "The URL to this schema",
"type": "string"
}
},
"required": ["date_time", "program"],
"additionalProperties": False
}
}
}
@classmethod
def tearDownClass(cls):
# Remove the temporary directories used to house subset data
shutil.rmtree(cls.subset_output_dir)
def test_subset_variables(self):
"""
Test that all variables present in the original NetCDF file
are present after the subset takes place, and with the same
attributes.
"""
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
for in_var, out_var in zip(in_ds.data_vars.items(), out_ds.data_vars.items()):
# compare names
assert in_var[0] == out_var[0]
# compare attributes
np.testing.assert_equal(in_var[1].attrs, out_var[1].attrs)
# compare type and dimension names
assert in_var[1].dtype == out_var[1].dtype
assert in_var[1].dims == out_var[1].dims
in_ds.close()
out_ds.close()
def test_subset_bbox(self):
"""
Test that all data present is within the bounding box given,
and that the correct bounding box is used. This test assumed
that the scanline *is* being cut.
"""
# pylint: disable=too-many-locals
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
lat_var_name, lon_var_name = subset.get_coord_variable_names(out_ds)
lat_var_name = lat_var_name[0]
lon_var_name = lon_var_name[0]
lon_bounds, lat_bounds = subset.convert_bbox(bbox, out_ds, lat_var_name, lon_var_name)
lats = out_ds[lat_var_name].values
lons = out_ds[lon_var_name].values
np.warnings.filterwarnings('ignore')
# Step 1: Get mask of values which aren't in the bounds.
# For lon spatial condition, need to consider the
# lon_min > lon_max case. If that's the case, should do
# an 'or' instead.
oper = operator.and_ if lon_bounds[0] < lon_bounds[1] else operator.or_
# In these two masks, True == valid and False == invalid
lat_truth = np.ma.masked_where((lats >= lat_bounds[0])
& (lats <= lat_bounds[1]), lats).mask
lon_truth = np.ma.masked_where(oper((lons >= lon_bounds[0]),
(lons <= lon_bounds[1])), lons).mask
# combine masks
spatial_mask = np.bitwise_and(lat_truth, lon_truth)
# Create a mask which represents the valid matrix bounds of
# the spatial mask. This is used in the case where a var
# has no _FillValue.
if lon_truth.ndim == 1:
bound_mask = spatial_mask
else:
rows = np.any(spatial_mask, axis=1)
cols = np.any(spatial_mask, axis=0)
bound_mask = np.array([[r & c for c in cols] for r in rows])
# If all the lat/lon values are valid, the file is valid and
# there is no need to check individual variables.
if np.all(spatial_mask):
continue
# Step 2: Get mask of values which are NaN or "_FillValue in
# each variable.
for _, var in out_ds.data_vars.items():
# remove dimension of '1' if necessary
vals = np.squeeze(var.values)
# Get the Fill Value
fill_value = var.attrs.get('_FillValue')
# If _FillValue isn't provided, check that all values
# are in the valid matrix bounds go to the next variable
if fill_value is None:
combined_mask = np.ma.mask_or(spatial_mask, bound_mask)
np.testing.assert_equal(bound_mask, combined_mask)
continue
# If the shapes of this var doesn't match the mask,
# reshape the var so the comparison can be made. Take
# the first index of the unknown dims. This makes
# assumptions about the ordering of the dimensions.
if vals.shape != out_ds[lat_var_name].shape and vals.shape:
slice_list = []
for dim in var.dims:
if dim in out_ds[lat_var_name].dims:
slice_list.append(slice(None))
else:
slice_list.append(slice(0, 1))
vals = np.squeeze(vals[tuple(slice_list)])
# In this mask, False == NaN and True = valid
var_mask = np.invert(np.ma.masked_invalid(vals).mask)
fill_mask = np.invert(np.ma.masked_values(vals, fill_value).mask)
var_mask = np.bitwise_and(var_mask, fill_mask)
# Step 3: Combine the spatial and var mask with 'or'
combined_mask = np.ma.mask_or(var_mask, spatial_mask)
# Step 4: compare the newly combined mask and the
# spatial mask created from the lat/lon masks. They
# should be equal, because the 'or' of the two masks
# where out-of-bounds values are 'False' will leave
# those values assuming there are only NaN values
# in the data at those locations.
np.testing.assert_equal(spatial_mask, combined_mask)
out_ds.close()
@pytest.mark.skip(reason="This is being tested currently. Temporarily skipped.")
def test_subset_no_bbox(self):
"""
Test that the subsetted file is identical to the given file
when a 'full' bounding box is given.
"""
bbox = np.array(((-180, 180), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
# pylint: disable=no-member
in_nc = nc.Dataset(join(self.test_data_dir, file), 'r')
out_nc = nc.Dataset(join(self.subset_output_dir, output_file), 'r')
# Make sure the output dimensions match the input
# dimensions, which means the full file was returned.
for name, dimension in in_nc.dimensions.items():
assert dimension.size == out_nc.dimensions[name].size
in_nc.close()
out_nc.close()
def test_subset_empty_bbox(self):
"""
Test that an empty file is returned when the bounding box
contains no data.
"""
bbox = np.array(((120, 125), (-90, -85)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
empty_dataset = xr.open_dataset(
join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
# Ensure all variables are present but empty.
for variable_name, variable in empty_dataset.data_vars.items():
assert not variable.data
def test_bbox_conversion(self):
"""
Test that the bounding box conversion returns expected
results. Expected results are hand-calculated.
"""
ds_180 = xr.open_dataset(join(self.test_data_dir,
"MODIS_A-JPL-L2P-v2014.0.nc"),
decode_times=False,
decode_coords=False)
ds_360 = xr.open_dataset(join(
self.test_data_dir,
"ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc"),
decode_times=False,
decode_coords=False)
# Elements in each tuple are:
# ds type, lon_range, expected_result
test_bboxes = [
(ds_180, (-180, 180), (-180, 180)),
(ds_360, (-180, 180), (0, 360)),
(ds_180, (-180, 0), (-180, 0)),
(ds_360, (-180, 0), (180, 360)),
(ds_180, (-80, 80), (-80, 80)),
(ds_360, (-80, 80), (280, 80)),
(ds_180, (0, 180), (0, 180)),
(ds_360, (0, 180), (0, 180)),
(ds_180, (80, -80), (80, -80)),
(ds_360, (80, -80), (80, 280)),
(ds_180, (-80, -80), (-180, 180)),
(ds_360, (-80, -80), (0, 360))
]
lat_var = 'lat'
lon_var = 'lon'
for test_bbox in test_bboxes:
dataset = test_bbox[0]
lon_range = test_bbox[1]
expected_result = test_bbox[2]
actual_result, _ = subset.convert_bbox(np.array([lon_range, [0, 0]]),
dataset, lat_var, lon_var)
np.testing.assert_equal(actual_result, expected_result)
def compare_java(self, java_files, cut):
"""
Run the L2 subsetter and compare the result to the equivelant
legacy (Java) subsetter result.
Parameters
----------
java_files : list of strings
List of paths to each subsetted Java file.
cut : boolean
True if the subsetter should return compact.
"""
bbox_map = [("ascat_20150702_084200", ((-180, 0), (-90, 0))),
("ascat_20150702_102400", ((-180, 0), (-90, 0))),
("MODIS_A-JPL", ((65.8, 86.35), (40.1, 50.15))),
("MODIS_T-JPL", ((-78.7, -60.7), (-54.8, -44))),
("VIIRS", ((-172.3, -126.95), (62.3, 70.65))),
("AMSR2-L2B_v08_r38622", ((-180, 0), (-90, 0)))]
for file_str, bbox in bbox_map:
java_file = [file for file in java_files if file_str in file][0]
test_file = [file for file in self.test_files if file_str in file][0]
output_file = "{}_{}".format(self._testMethodName, test_file)
subset.subset(
file_to_subset=join(self.test_data_dir, test_file),
bbox=np.array(bbox),
output_file=join(self.subset_output_dir, output_file),
cut=cut
)
j_ds = xr.open_dataset(join(self.test_data_dir, java_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
py_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
for var_name, var in j_ds.data_vars.items():
# Compare shape
np.testing.assert_equal(var.shape, py_ds[var_name].shape)
# Compare meta
np.testing.assert_equal(var.attrs, py_ds[var_name].attrs)
# Compare data
np.testing.assert_equal(var.values, py_ds[var_name].values)
# Compare meta. History will always be different, so remove
# from the headers for comparison.
del j_ds.attrs['history']
del py_ds.attrs['history']
del py_ds.attrs['history_json']
np.testing.assert_equal(j_ds.attrs, py_ds.attrs)
def test_compare_java_compact(self):
"""
Tests that the results of the subsetting operation is
equivalent to the Java subsetting result on the same bounding
box. For simplicity the subsetted Java granules have been
manually run and copied into this project. This test DOES
cut the scanline.
"""
java_result_files = [join("java_results", "cut", f) for f in
listdir(join(self.test_data_dir, "java_results", "cut")) if
isfile(join(self.test_data_dir, "java_results", "cut", f))
and f.endswith(".nc")]
self.compare_java(java_result_files, cut=True)
def test_compare_java(self):
"""
Tests that the results of the subsetting operation is
equivalent to the Java subsetting result on the same bounding
box. For simplicity the subsetted Java granules have been
manually run and copied into this project. This runs does NOT
cut the scanline.
"""
java_result_files = [join("java_results", "uncut", f) for f in
listdir(join(self.test_data_dir, "java_results", "uncut")) if
isfile(join(self.test_data_dir, "java_results", "uncut", f))
and f.endswith(".nc")]
self.compare_java(java_result_files, cut=False)
def test_history_metadata_append(self):
"""
Tests that the history metadata header is appended to when it
already exists.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
subset.subset(
file_to_subset=join(self.test_data_dir, test_file),
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
# Assert that the original granule contains history
assert in_nc.attrs.get('history') is not None
# Assert that input and output files have different history
self.assertNotEqual(in_nc.attrs['history'], out_nc.attrs['history'])
# Assert that last line of history was created by this service
assert SERVICE_NAME in out_nc.attrs['history'].split('\n')[-1]
# Assert that the old history is still in the subsetted granule
assert in_nc.attrs['history'] in out_nc.attrs['history']
def test_history_metadata_create(self):
"""
Tests that the history metadata header is created when it does
not exist. All test granules contain this header already, so
for this test the header will be removed manually from a granule.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
# Remove the 'history' metadata from the granule
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
del in_nc.attrs['history']
in_nc.to_netcdf(join(self.subset_output_dir, 'int_{}'.format(output_file)), 'w')
subset.subset(
file_to_subset=join(self.subset_output_dir, "int_{}".format(output_file)),
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
# Assert that the input granule contains no history
assert in_nc.attrs.get('history') is None
# Assert that the history was created by this service
assert SERVICE_NAME in out_nc.attrs['history']
# Assert that the history created by this service is the only
# line present in the history.
assert '\n' not in out_nc.attrs['history']
def test_specified_variables(self):
"""
Test that the variables which are specified when calling the subset
operation are present in the resulting subsetted data file,
and that the variables which are specified are not present.
"""
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
included_variables = set([variable[0] for variable in in_ds.data_vars.items()][::2])
included_variables = list(included_variables)
excluded_variables = list(set(variable[0] for variable in in_ds.data_vars.items())
- set(included_variables))
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
variables=included_variables
)
# Get coord variables
lat_var_names, lon_var_names = subset.get_coord_variable_names(in_ds)
lat_var_name = lat_var_names[0]
lon_var_name = lon_var_names[0]
time_var_name = subset.get_time_variable_name(in_ds, in_ds[lat_var_name])
included_variables.append(lat_var_name)
included_variables.append(lon_var_name)
included_variables.append(time_var_name)
included_variables.extend(in_ds.coords.keys())
if lat_var_name in excluded_variables:
excluded_variables.remove(lat_var_name)
if lon_var_name in excluded_variables:
excluded_variables.remove(lon_var_name)
if time_var_name in excluded_variables:
excluded_variables.remove(time_var_name)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
out_vars = [out_var for out_var in out_ds.data_vars.keys()]
out_vars.extend(out_ds.coords.keys())
assert set(out_vars) == set(included_variables)
assert set(out_vars).isdisjoint(excluded_variables)
in_ds.close()
out_ds.close()
def test_calculate_chunks(self):
"""
Test that the calculate chunks function in the subset module
correctly calculates and returns the chunks dims dictionary.
"""
rs = np.random.RandomState(0)
dataset = xr.DataArray(
rs.randn(2, 4000, 4001),
dims=['x', 'y', 'z']
).to_dataset(name='foo')
chunk_dict = subset.calculate_chunks(dataset)
assert chunk_dict.get('x') is None
assert chunk_dict.get('y') is None
assert chunk_dict.get('z') == 4000
def test_missing_coord_vars(self):
"""
As of right now, the subsetter expects the data to contain lat
and lon variables. If not present, an error is thrown.
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
# Manually remove var which will cause error when attempting
# to subset.
ds = ds.drop_vars(['lat'])
output_file = '{}_{}'.format('missing_coords', file)
ds.to_netcdf(join(self.subset_output_dir, output_file))
bbox = np.array(((-180, 180), (-90, 90)))
with pytest.raises(ValueError):
subset.subset(
file_to_subset=join(self.subset_output_dir, output_file),
bbox=bbox,
output_file=''
)
def test_data_1D(self):
"""
Test that subsetting a 1-D granule does not result in failure.
"""
merged_jason_filename = 'JA1_GPN_2PeP001_002_20020115_060706_20020115_070316.nc'
output_file = "{}_{}".format(self._testMethodName, merged_jason_filename)
subset.subset(
file_to_subset=join(self.test_data_dir, merged_jason_filename),
bbox=np.array(((-180, 0), (-90, 0))),
output_file=join(self.subset_output_dir, output_file)
)
xr.open_dataset(join(self.subset_output_dir, output_file))
def test_get_coord_variable_names(self):
"""
Test that the expected coord variable names are returned
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
old_lat_var_name = 'lat'
old_lon_var_name = 'lon'
lat_var_name, lon_var_name = subset.get_coord_variable_names(ds)
assert lat_var_name[0] == old_lat_var_name
assert lon_var_name[0] == old_lon_var_name
new_lat_var_name = 'latitude'
new_lon_var_name = 'x'
ds = ds.rename({old_lat_var_name: new_lat_var_name,
old_lon_var_name: new_lon_var_name})
lat_var_name, lon_var_name = subset.get_coord_variable_names(ds)
assert lat_var_name[0] == new_lat_var_name
assert lon_var_name[0] == new_lon_var_name
def test_cannot_get_coord_variable_names(self):
"""
Test that, when given a dataset with coord vars which are not
expected, a ValueError is raised.
"""
file = 'MODIS_T-JPL-L2P-v2014.0.nc'
ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
old_lat_var_name = 'lat'
new_lat_var_name = 'foo'
ds = ds.rename({old_lat_var_name: new_lat_var_name})
# Remove 'coordinates' attribute
for var_name, var in ds.items():
if 'coordinates' in var.attrs:
del var.attrs['coordinates']
self.assertRaises(ValueError, subset.get_coord_variable_names, ds)
def test_get_spatial_bounds(self):
"""
Test that the get_spatial_bounds function works as expected.
The get_spatial_bounds function should return lat/lon min/max
which is masked and scaled for both variables. The values
should also be adjusted for -180,180/-90,90 coordinate types
"""
ascat_filename = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
ghrsst_filename = '20190927000500-JPL-L2P_GHRSST-SSTskin-MODIS_A-D-v02.0-fv01.0.nc'
ascat_dataset = xr.open_dataset(
join(self.test_data_dir, ascat_filename),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
ghrsst_dataset = xr.open_dataset(
join(self.test_data_dir, ghrsst_filename),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
# ascat1 longitude is -0 360, ghrsst modis A is -180 180
# Both have metadata for valid_min
# Manually calculated spatial bounds
ascat_expected_lat_min = -89.4
ascat_expected_lat_max = 89.2
ascat_expected_lon_min = -180.0
ascat_expected_lon_max = 180.0
ghrsst_expected_lat_min = -77.2
ghrsst_expected_lat_max = -53.6
ghrsst_expected_lon_min = -170.5
ghrsst_expected_lon_max = -101.7
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ascat_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ascat_expected_lat_min)
assert np.isclose(max_lat, ascat_expected_lat_max)
assert np.isclose(min_lon, ascat_expected_lon_min)
assert np.isclose(max_lon, ascat_expected_lon_max)
# Remove the label from the dataset coordinate variables indicating the valid_min.
del ascat_dataset['lat'].attrs['valid_min']
del ascat_dataset['lon'].attrs['valid_min']
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ascat_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ascat_expected_lat_min)
assert np.isclose(max_lat, ascat_expected_lat_max)
assert np.isclose(min_lon, ascat_expected_lon_min)
assert np.isclose(max_lon, ascat_expected_lon_max)
# Repeat test, but with GHRSST granule
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ghrsst_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ghrsst_expected_lat_min)
assert np.isclose(max_lat, ghrsst_expected_lat_max)
assert np.isclose(min_lon, ghrsst_expected_lon_min)
assert np.isclose(max_lon, ghrsst_expected_lon_max)
# Remove the label from the dataset coordinate variables indicating the valid_min.
del ghrsst_dataset['lat'].attrs['valid_min']
del ghrsst_dataset['lon'].attrs['valid_min']
min_lon, max_lon, min_lat, max_lat = subset.get_spatial_bounds(
dataset=ghrsst_dataset,
lat_var_names=['lat'],
lon_var_names=['lon']
).flatten()
assert np.isclose(min_lat, ghrsst_expected_lat_min)
assert np.isclose(max_lat, ghrsst_expected_lat_max)
assert np.isclose(min_lon, ghrsst_expected_lon_min)
assert np.isclose(max_lon, ghrsst_expected_lon_max)
def test_shapefile_subset(self):
"""
Test that using a shapefile to subset data instead of a bbox
works as expected
"""
shapefile = 'test.shp'
ascat_filename = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
output_filename = f'{self._testMethodName}_{ascat_filename}'
shapefile_file_path = join(self.test_data_dir, 'test_shapefile_subset', shapefile)
ascat_file_path = join(self.test_data_dir, ascat_filename)
output_file_path = join(self.subset_output_dir, output_filename)
subset.subset(
file_to_subset=ascat_file_path,
bbox=None,
output_file=output_file_path,
shapefile=shapefile_file_path
)
# Check that each point of data is within the shapefile
shapefile_df = gpd.read_file(shapefile_file_path)
with xr.open_dataset(output_file_path) as result_dataset:
def in_shape(lon, lat):
if np.isnan(lon) or np.isnan(lat):
return
point = Point(lon, lat)
point_in_shapefile = shapefile_df.contains(point)
assert point_in_shapefile[0]
in_shape_vec = np.vectorize(in_shape)
in_shape_vec(result_dataset.lon, result_dataset.lat)
def test_variable_subset_oco2(self):
"""
variable subsets for groups and root group using a '/'
"""
oco2_file_name = 'oco2_LtCO2_190201_B10206Ar_200729175909s.nc4'
output_file_name = 'oco2_test_out.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'OCO2', oco2_file_name),
os.path.join(self.subset_output_dir, oco2_file_name))
bbox = np.array(((-180,180),(-90.0,90)))
variables = ['/xco2','/xco2_quality_flag','/Retrieval/water_height','/sounding_id']
subset.subset(
file_to_subset=join(self.test_data_dir, 'OCO2',oco2_file_name),
bbox=bbox,
variables=variables,
output_file=join(self.subset_output_dir, output_file_name),
)
out_nc = nc.Dataset(join(self.subset_output_dir, output_file_name))
var_listout = list(out_nc.groups['Retrieval'].variables.keys())
assert ('water_height' in var_listout)
def test_variable_subset_oco3(self):
"""
multiple variable subset of variables in different groups in oco3
"""
oco3_file_name = 'oco3_LtSIF_200226_B10206r_200709053505s.nc4'
output_file_name = 'oco3_test_out.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'OCO3/OCO3_L2_LITE_SIF.EarlyR', oco3_file_name),
os.path.join(self.subset_output_dir, oco3_file_name))
bbox = np.array(((-180,180),(-90.0,90)))
variables = ['/Science/IGBP_index', '/Offset/SIF_Relative_SDev_757nm','/Meteo/temperature_skin']
subset.subset(
file_to_subset=join(self.test_data_dir, 'OCO3/OCO3_L2_LITE_SIF.EarlyR',oco3_file_name),
bbox=bbox,
variables=variables,
output_file=join(self.subset_output_dir, output_file_name),
)
out_nc = nc.Dataset(join(self.subset_output_dir, output_file_name))
var_listout =list(out_nc.groups['Science'].variables.keys())
var_listout.extend(list(out_nc.groups['Offset'].variables.keys()))
var_listout.extend(list(out_nc.groups['Meteo'].variables.keys()))
assert ('IGBP_index' in var_listout)
assert ('SIF_Relative_SDev_757nm' in var_listout)
assert ('temperature_skin' in var_listout)
def test_variable_subset_s6(self):
"""
multiple variable subset of variables in different groups in oco3
"""
s6_file_name = 'S6A_P4_2__LR_STD__ST_002_140_20201207T011501_20201207T013023_F00.nc'
output_file_name = 's6_test_out.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'sentinel_6', s6_file_name),
os.path.join(self.subset_output_dir, s6_file_name))
bbox = np.array(((-180,180),(-90.0,90)))
variables = ['/data_01/ku/range_ocean_mle3_rms', '/data_20/ku/range_ocean']
subset.subset(
file_to_subset=join(self.test_data_dir, 'sentinel_6',s6_file_name),
bbox=bbox,
variables=variables,
output_file=join(self.subset_output_dir, output_file_name),
)
out_nc = nc.Dataset(join(self.subset_output_dir, output_file_name))
var_listout =list(out_nc.groups['data_01'].groups['ku'].variables.keys())
var_listout.extend(list(out_nc.groups['data_20'].groups['ku'].variables.keys()))
assert ('range_ocean_mle3_rms' in var_listout)
assert ('range_ocean' in var_listout)
def test_transform_grouped_dataset(self):
"""
Test that the transformation function results in a correctly
formatted dataset.
"""
s6_file_name = 'S6A_P4_2__LR_STD__ST_002_140_20201207T011501_20201207T013023_F00.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'sentinel_6', s6_file_name),
os.path.join(self.subset_output_dir, s6_file_name))
nc_ds = nc.Dataset(os.path.join(self.test_data_dir, 'sentinel_6', s6_file_name))
nc_ds_transformed = subset.transform_grouped_dataset(
nc.Dataset(os.path.join(self.subset_output_dir, s6_file_name), 'r'),
os.path.join(self.subset_output_dir, s6_file_name)
)
# The original ds has groups
assert nc_ds.groups
# There should be no groups in the new ds
assert not nc_ds_transformed.groups
# The original ds has no variables in the root group
assert not nc_ds.variables
# The new ds has variables in the root group
assert nc_ds_transformed.variables
# Each var in the new ds should map to a variable in the old ds
for var_name, var in nc_ds_transformed.variables.items():
path = var_name.strip('__').split('__')
group = nc_ds[path[0]]
for g in path[1:-1]:
group = group[g]
assert var_name.strip('__').split('__')[-1] in group.variables.keys()
def test_group_subset(self):
"""
Ensure a subset function can be run on a granule that contains
groups without errors, and that the subsetted data is within
the given spatial bounds.
"""
s6_file_name = 'S6A_P4_2__LR_STD__ST_002_140_20201207T011501_20201207T013023_F00.nc'
s6_output_file_name = 'SS_S6A_P4_2__LR_STD__ST_002_140_20201207T011501_20201207T013023_F00.nc'
# Copy S6 file to temp dir
shutil.copyfile(
os.path.join(self.test_data_dir, 'sentinel_6', s6_file_name),
os.path.join(self.subset_output_dir, s6_file_name)
)
# Make sure it runs without errors
bbox = np.array(((150, 180), (-90, -50)))
bounds = subset.subset(
file_to_subset=os.path.join(self.subset_output_dir, s6_file_name),
bbox=bbox,
output_file=os.path.join(self.subset_output_dir, s6_output_file_name)
)
# Check that bounds are within requested bbox
assert bounds[0][0] >= bbox[0][0]
assert bounds[0][1] <= bbox[0][1]
assert bounds[1][0] >= bbox[1][0]
assert bounds[1][1] <= bbox[1][1]
def test_json_history_metadata_append(self):
"""
Tests that the json history metadata header is appended to when it
already exists. First we create a fake json_history header for input file.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
input_file_subset = join(self.subset_output_dir, "int_{}".format(output_file))
fake_history = [
{
"date_time": "2021-05-10T14:30:24.553263",
"derived_from": basename(input_file_subset),
"program": SERVICE_NAME,
"version": importlib_metadata.distribution(SERVICE_NAME).version,
"parameters": "bbox=[[-180.0, 180.0], [-90.0, 90.0]] cut=True",
"program_ref": "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD",
"$schema": "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json"
}
]
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
in_nc.attrs['history_json'] = json.dumps(fake_history)
in_nc.to_netcdf(join(self.subset_output_dir, 'int_{}'.format(output_file)), 'w')
subset.subset(
file_to_subset=input_file_subset,
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
history_json = json.loads(out_nc.attrs['history_json'])
assert len(history_json) == 2
is_valid_shema = validate(instance=history_json, schema=self.history_json_schema)
assert is_valid_shema is None
for history in history_json:
assert "date_time" in history
assert history.get('program') == SERVICE_NAME
assert history.get('derived_from') == basename(input_file_subset)
assert history.get('version') == importlib_metadata.distribution(SERVICE_NAME).version
assert history.get('parameters') == 'bbox=[[-180.0, 180.0], [-90.0, 90.0]] cut=True'
assert history.get(
'program_ref') == "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD"
assert history.get(
'$schema') == "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json"
def test_json_history_metadata_create(self):
"""
Tests that the json history metadata header is created when it does
not exist. All test granules does not contain this header.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
# Remove the 'history' metadata from the granule
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
in_nc.to_netcdf(join(self.subset_output_dir, 'int_{}'.format(output_file)), 'w')
input_file_subset = join(self.subset_output_dir, "int_{}".format(output_file))
subset.subset(
file_to_subset=input_file_subset,
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file)
)
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
history_json = json.loads(out_nc.attrs['history_json'])
assert len(history_json) == 1
is_valid_shema = validate(instance=history_json, schema=self.history_json_schema)
assert is_valid_shema is None
for history in history_json:
assert "date_time" in history
assert history.get('program') == SERVICE_NAME
assert history.get('derived_from') == basename(input_file_subset)
assert history.get('version') == importlib_metadata.distribution(SERVICE_NAME).version
assert history.get('parameters') == 'bbox=[[-180.0, 180.0], [-90.0, 90.0]] cut=True'
assert history.get(
'program_ref') == "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD"
assert history.get(
'$schema') == "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json"
def test_json_history_metadata_create_origin_source(self):
"""
Tests that the json history metadata header is created when it does
not exist. All test granules does not contain this header.
"""
test_file = next(filter(
lambda f: '20180101005944-REMSS-L2P_GHRSST-SSTsubskin-AMSR2-L2B_rt_r29918-v02.0-fv01.0.nc' in f
, self.test_files))
output_file = "{}_{}".format(self._testMethodName, test_file)
# Remove the 'history' metadata from the granule
in_nc = xr.open_dataset(join(self.test_data_dir, test_file))
in_nc.to_netcdf(join(self.subset_output_dir, 'int_{}'.format(output_file)), 'w')
input_file_subset = join(self.subset_output_dir, "int_{}".format(output_file))
subset.subset(
file_to_subset=input_file_subset,
bbox=np.array(((-180, 180), (-90.0, 90))),
output_file=join(self.subset_output_dir, output_file),
origin_source="fake_original_file.nc"
)
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file))
history_json = json.loads(out_nc.attrs['history_json'])
assert len(history_json) == 1
is_valid_shema = validate(instance=history_json, schema=self.history_json_schema)
assert is_valid_shema is None
for history in history_json:
assert "date_time" in history
assert history.get('program') == SERVICE_NAME
assert history.get('derived_from') == "fake_original_file.nc"
assert history.get('version') == importlib_metadata.distribution(SERVICE_NAME).version
assert history.get('parameters') == 'bbox=[[-180.0, 180.0], [-90.0, 90.0]] cut=True'
assert history.get(
'program_ref') == "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD"
assert history.get(
'$schema') == "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json"
def test_temporal_subset_ascat(self):
"""
Test that a temporal subset results in a granule that only
contains times within the given bounds.
"""
bbox = np.array(((-180, 180), (-90, 90)))
file = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
output_file = "{}_{}".format(self._testMethodName, file)
min_time = '2015-07-02T09:00:00'
max_time = '2015-07-02T10:00:00'
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
min_time=min_time,
max_time=max_time
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
# Check that 'time' types match
assert in_ds.time.dtype == out_ds.time.dtype
in_ds.close()
out_ds.close()
# Check that all times are within the given bounds. Open
# dataset using 'decode_times=True' for auto-conversions to
# datetime
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_coords=False)
start_dt = subset.translate_timestamp(min_time)
end_dt = subset.translate_timestamp(max_time)
# All dates should be within the given temporal bounds.
assert (out_ds.time >= pd.to_datetime(start_dt)).all()
assert (out_ds.time <= pd.to_datetime(end_dt)).all()
def test_temporal_subset_modis_a(self):
"""
Test that a temporal subset results in a granule that only
contains times within the given bounds.
"""
bbox = np.array(((-180, 180), (-90, 90)))
file = 'MODIS_A-JPL-L2P-v2014.0.nc'
output_file = "{}_{}".format(self._testMethodName, file)
min_time = '2019-08-05T06:57:00'
max_time = '2019-08-05T06:58:00'
# Actual min is 2019-08-05T06:55:01.000000000
# Actual max is 2019-08-05T06:59:57.000000000
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
min_time=min_time,
max_time=max_time
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
# Check that 'time' types match
assert in_ds.time.dtype == out_ds.time.dtype
in_ds.close()
out_ds.close()
# Check that all times are within the given bounds. Open
# dataset using 'decode_times=True' for auto-conversions to
# datetime
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_coords=False)
start_dt = subset.translate_timestamp(min_time)
end_dt = subset.translate_timestamp(max_time)
epoch_dt = out_ds['time'].values[0]
# All timedelta + epoch should be within the given temporal bounds.
assert out_ds.sst_dtime.min() + epoch_dt >= np.datetime64(start_dt)
assert out_ds.sst_dtime.min() + epoch_dt <= np.datetime64(end_dt)
def test_temporal_subset_s6(self):
"""
Test that a temporal subset results in a granule that only
contains times within the given bounds.
"""
bbox = np.array(((-180, 180), (-90, 90)))
file = 'S6A_P4_2__LR_STD__ST_002_140_20201207T011501_20201207T013023_F00.nc'
# Copy S6 file to temp dir
shutil.copyfile(
os.path.join(self.test_data_dir, 'sentinel_6', file),
os.path.join(self.subset_output_dir, file)
)
output_file = "{}_{}".format(self._testMethodName, file)
min_time = '2020-12-07T01:20:00'
max_time = '2020-12-07T01:25:00'
# Actual min is 2020-12-07T01:15:01.000000000
# Actual max is 2020-12-07T01:30:23.000000000
subset.subset(
file_to_subset=join(self.subset_output_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
min_time=min_time,
max_time=max_time
)
# Check that all times are within the given bounds. Open
# dataset using 'decode_times=True' for auto-conversions to
# datetime
out_ds = xr.open_dataset(
join(self.subset_output_dir, output_file),
decode_coords=False,
group='data_01'
)
start_dt = subset.translate_timestamp(min_time)
end_dt = subset.translate_timestamp(max_time)
# All dates should be within the given temporal bounds.
assert (out_ds.time >= pd.to_datetime(start_dt)).all()
assert (out_ds.time <= pd.to_datetime(end_dt)).all()
def test_get_time_variable_name(self):
for test_file in self.test_files:
args = {
'decode_coords': False,
'mask_and_scale': False,
'decode_times': True
}
ds = xr.open_dataset(os.path.join(self.test_data_dir, test_file), **args)
lat_var_name = subset.get_coord_variable_names(ds)[0][0]
time_var_name = subset.get_time_variable_name(ds, ds[lat_var_name])
assert time_var_name is not None
assert 'time' in time_var_name
def test_subset_jason(self):
bbox = np.array(((-180, 0), (-90, 90)))
file = 'JA1_GPN_2PeP001_002_20020115_060706_20020115_070316.nc'
output_file = "{}_{}".format(self._testMethodName, file)
min_time = "2002-01-15T06:07:06Z"
max_time = "2002-01-15T06:30:16Z"
subset.subset(
file_to_subset=os.path.join(self.test_data_dir, file),
bbox=bbox,
min_time=min_time,
max_time=max_time,
output_file=os.path.join(self.subset_output_dir, output_file)
)
def test_subset_size(self):
for file in self.test_files:
bbox = np.array(((-180, 0), (-30, 90)))
output_file = "{}_{}".format(self._testMethodName, file)
input_file_path = os.path.join(self.test_data_dir, file)
output_file_path = os.path.join(self.subset_output_dir, output_file)
subset.subset(
file_to_subset=input_file_path,
bbox=bbox,
output_file=output_file_path
)
original_file_size = os.path.getsize(input_file_path)
subset_file_size = os.path.getsize(output_file_path)
assert subset_file_size < original_file_size
def test_root_group(self):
"""test that the GROUP_DELIM string, '__', is added to variables in the root group"""
sndr_file_name = 'SNDR.SNPP.CRIMSS.20200118T0024.m06.g005.L2_CLIMCAPS_RET.std.v02_28.G.200314032326.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'SNDR', sndr_file_name),
os.path.join(self.subset_output_dir, sndr_file_name))
nc_dataset = nc.Dataset(os.path.join(self.subset_output_dir, sndr_file_name))
args = {
'decode_coords': False,
'mask_and_scale': False,
'decode_times': False
}
nc_dataset = subset.transform_grouped_dataset(nc_dataset, os.path.join(self.subset_output_dir, sndr_file_name))
with xr.open_dataset(
xr.backends.NetCDF4DataStore(nc_dataset),
**args
) as dataset:
var_list = list(dataset.variables)
assert (var_list[0][0:2] == subset.GROUP_DELIM)
group_lst = []
for var_name in dataset.variables.keys(): #need logic if there is data in the top level not in a group
group_lst.append('/'.join(var_name.split(subset.GROUP_DELIM)[:-1]))
group_lst = ['/' if group=='' else group for group in group_lst]
groups = set(group_lst)
expected_group = {'/mw', '/ave_kern', '/', '/mol_lay', '/aux'}
assert (groups == expected_group)
def test_get_time_squeeze(self):
"""test builtin squeeze method on the lat and time variables so
when the two have the same shape with a time and delta time in
the tropomi product granuales the get_time_variable_name returns delta time as well"""
tropomi_file_name = 'S5P_OFFL_L2__SO2____20200713T002730_20200713T020900_14239_01_020103_20200721T191355_subset.nc4'
shutil.copyfile(os.path.join(self.test_data_dir, 'tropomi', tropomi_file_name),
os.path.join(self.subset_output_dir, tropomi_file_name))
nc_dataset = nc.Dataset(os.path.join(self.subset_output_dir, tropomi_file_name))
args = {
'decode_coords': False,
'mask_and_scale': False,
'decode_times': False
}
nc_dataset = subset.transform_grouped_dataset(nc_dataset, os.path.join(self.subset_output_dir, tropomi_file_name))
with xr.open_dataset(
xr.backends.NetCDF4DataStore(nc_dataset),
**args
) as dataset:
lat_var_name = subset.get_coord_variable_names(dataset)[0][0]
time_var_name = subset.get_time_variable_name(dataset, dataset[lat_var_name])
lat_dims = dataset[lat_var_name].squeeze().dims
time_dims = dataset[time_var_name].squeeze().dims
assert (lat_dims == time_dims)
def test_get_indexers_nd(self):
"""test that the time coordinate is not included in the indexers. Also test that the dimensions are the same for
a global box subset"""
tropomi_file_name = 'S5P_OFFL_L2__SO2____20200713T002730_20200713T020900_14239_01_020103_20200721T191355_subset.nc4'
shutil.copyfile(os.path.join(self.test_data_dir, 'tropomi', tropomi_file_name),
os.path.join(self.subset_output_dir, tropomi_file_name))
nc_dataset = nc.Dataset(os.path.join(self.subset_output_dir, tropomi_file_name))
args = {
'decode_coords': False,
'mask_and_scale': False,
'decode_times': False
}
nc_dataset = subset.transform_grouped_dataset(nc_dataset, os.path.join(self.subset_output_dir, tropomi_file_name))
with xr.open_dataset(
xr.backends.NetCDF4DataStore(nc_dataset),
**args
) as dataset:
lat_var_name = subset.get_coord_variable_names(dataset)[0][0]
lon_var_name = subset.get_coord_variable_names(dataset)[1][0]
time_var_name = subset.get_time_variable_name(dataset, dataset[lat_var_name])
oper = operator.and_
cond = oper(
(dataset[lon_var_name] >= -180),
(dataset[lon_var_name] <= 180)
) & (dataset[lat_var_name] >= -90) & (dataset[lat_var_name] <= 90) & True
indexers = xre.get_indexers_from_nd(cond, True)
indexed_cond = cond.isel(**indexers)
indexed_ds = dataset.isel(**indexers)
new_dataset = indexed_ds.where(indexed_cond)
assert ((time_var_name not in indexers.keys()) == True) #time can't be in the index
assert (new_dataset.dims == dataset.dims)
def test_variable_type_string_oco2(self):
"""Code passes a ceating a variable that is type object in oco2 file"""
oco2_file_name = 'oco2_LtCO2_190201_B10206Ar_200729175909s.nc4'
output_file_name = 'oco2_test_out.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'OCO2', oco2_file_name),
os.path.join(self.subset_output_dir, oco2_file_name))
bbox = np.array(((-180,180),(-90.0,90)))
subset.subset(
file_to_subset=join(self.test_data_dir, 'OCO2',oco2_file_name),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file_name),
)
in_nc = xr.open_dataset(join(self.test_data_dir, 'OCO2',oco2_file_name))
out_nc = xr.open_dataset(join(self.subset_output_dir, output_file_name))
assert (in_nc.variables['source_files'].dtype == out_nc.variables['source_files'].dtype)
def test_variable_dims_matched_tropomi(self):
"""
Code must match the dimensions for each variable rather than
assume all dimensions in a group are the same
"""
tropomi_file_name = 'S5P_OFFL_L2__SO2____20200713T002730_20200713T020900_14239_01_020103_20200721T191355_subset.nc4'
output_file_name = 'tropomi_test_out.nc'
shutil.copyfile(os.path.join(self.test_data_dir, 'tropomi', tropomi_file_name),
os.path.join(self.subset_output_dir, tropomi_file_name))
in_nc = nc.Dataset(os.path.join(self.subset_output_dir, tropomi_file_name))
# Get variable dimensions from input dataset
in_var_dims = {
var_name: [dim.split(subset.GROUP_DELIM)[-1] for dim in var.dimensions]
for var_name, var in in_nc.groups['PRODUCT'].variables.items()
}
# Include PRODUCT>SUPPORT_DATA>GEOLOCATIONS location
in_var_dims.update(
{
var_name: [dim.split(subset.GROUP_DELIM)[-1] for dim in var.dimensions]
for var_name, var in in_nc.groups['PRODUCT'].groups['SUPPORT_DATA'].groups['GEOLOCATIONS'].variables.items()
}
)
out_nc = subset.transform_grouped_dataset(
in_nc, os.path.join(self.subset_output_dir, tropomi_file_name)
)
# Get variable dimensions from output dataset
out_var_dims = {
var_name.split(subset.GROUP_DELIM)[-1]: [dim.split(subset.GROUP_DELIM)[-1] for dim in var.dimensions]
for var_name, var in out_nc.variables.items()
}
self.assertDictEqual(in_var_dims, out_var_dims)
def test_temporal_merged_topex(self):
"""
Test that a temporal subset results in a granule that only
contains times within the given bounds.
"""
bbox = np.array(((-180, 180), (-90, 90)))
file = 'Merged_TOPEX_Jason_OSTM_Jason-3_Cycle_002.V4_2.nc'
# Copy S6 file to temp dir
shutil.copyfile(
os.path.join(self.test_data_dir, file),
os.path.join(self.subset_output_dir, file)
)
output_file = "{}_{}".format(self._testMethodName, file)
min_time = '1992-01-01T00:00:00'
max_time = '1992-11-01T00:00:00'
# Actual min is 2020-12-07T01:15:01.000000000
# Actual max is 2020-12-07T01:30:23.000000000
subset.subset(
file_to_subset=join(self.subset_output_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
min_time=min_time,
max_time=max_time
)
# Check that all times are within the given bounds. Open
# dataset using 'decode_times=True' for auto-conversions to
# datetime
out_ds = xr.open_dataset(
join(self.subset_output_dir, output_file),
decode_coords=False
)
start_dt = subset.translate_timestamp(min_time)
end_dt = subset.translate_timestamp(max_time)
# delta time from the MJD of this data collection
mjd_dt = np.datetime64("1992-01-01")
start_delta_dt = np.datetime64(start_dt) - mjd_dt
end_delta_dt = np.datetime64(end_dt) - mjd_dt
# All dates should be within the given temporal bounds.
assert (out_ds.time.values >= start_delta_dt).all()
assert (out_ds.time.values <= end_delta_dt).all()
def test_temporal_variable_subset(self):
"""
Test that both a temporal and variable subset can be executed
on a granule, and that all of the data within that granule is
subsetted as expected.
"""
bbox = np.array(((-180, 180), (-90, 90)))
file = 'ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc'
output_file = "{}_{}".format(self._testMethodName, file)
min_time = '2015-07-02T09:00:00'
max_time = '2015-07-02T10:00:00'
variables = [
'wind_speed',
'wind_dir'
]
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file),
min_time=min_time,
max_time=max_time,
variables=variables
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
# Check that 'time' types match
assert in_ds.time.dtype == out_ds.time.dtype
in_ds.close()
out_ds.close()
# Check that all times are within the given bounds. Open
# dataset using 'decode_times=True' for auto-conversions to
# datetime
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_coords=False)
start_dt = subset.translate_timestamp(min_time)
end_dt = subset.translate_timestamp(max_time)
# All dates should be within the given temporal bounds.
assert (out_ds.time >= | pd.to_datetime(start_dt) | pandas.to_datetime |
"""
Clean and format the texts in the data folder for learning.
Prerequisites:
nltk.download('stopwords')
nltk.download('punkt')
"""
from pathlib import Path
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
STOP_WORDS = set(stopwords.words('english'))
DATA_PATH = Path('.') / '..' / 'data'
DATA_SOURCES = [
DATA_PATH / 'amaz' / 'amaz_neg.csv',
DATA_PATH / 'amaz' / 'amaz_pos.csv',
DATA_PATH / 'news' / 'news_neg.csv',
DATA_PATH / 'news' / 'news_pos.csv',
DATA_PATH / 'yelp' / 'yelp_neg.csv',
DATA_PATH / 'yelp' / 'yelp_pos.csv',
]
def clean(text: str) -> str:
# Build word tokenizations
words = word_tokenize(text)
# Remove non-alphabetic characters.
words = [w for w in words if w.isalpha()]
# Transform all words to lowercase.
words = [w.lower() for w in words]
# Remove all stopwords from the text.
words = [w for w in words if w not in STOP_WORDS]
# Join the text with spaces and return as a single string.
return ' '.join(words)
if __name__ == '__main__':
neg = pd.DataFrame() # negative sentences
pos = pd.DataFrame() # positive sentences
# Clean the data from each data source.
for src in DATA_SOURCES:
# Read in the data source as a DataFrame with Pandas.
df = pd.read_csv(src, header=None)
# Clean and replace each row of the data source.
for i, row in enumerate(df.values):
df.at[i] = clean(row[0])
# Append the cleaned data set to the collection of negative or positive
# data depending on its sentiment.
if 'neg' in str(src):
neg = | pd.concat([neg, df]) | pandas.concat |
import h5py
import numpy as np
import pandas as pd
import os
import sys
import argparse
"""
Convert CME Trade Tick Data to hdf5 files indexe by date-time
Format allows for rapid read into pandas
"""
class ToHDF5:
def __init__(self):
self.usecols = [0,1,2,3,4]
self.names = ["date","time","price","qty","status"]
self.dtypes={'date':'str','time':'str','price':'float','qty':'int','status':'str'}
def convert(self, infile, outdir, lines):
if infile.split(".")[-1] == "h5":
print("ignoring file: %s already in h5 format" % infile)
return
df = pd.read_csv(infile,
usecols=self.usecols,
header=None,
names=self.names,
dtype=self.dtypes,
sep=",")
df['dt'] = df['date'] + " " + df['time']
df=df.drop(['date','time'], axis=1)
df['dt'] = pd.to_datetime(df['dt'],format="%m/%d/%Y %H:%M:%S.%f")
df.set_index('dt')
outfiles = self.chunkDataFrame(df, ToHDF5.getSymbol(infile), outdir, lines)
df = None
return outfiles
def getSymbol(infile):
return os.path.basename(infile).split("_")[0]
def splitFile(self, infile, outdir, lines, forceWrite=True):
df = | pd.read_hdf(infile, 'table') | pandas.read_hdf |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from preprocessor import Preprocessor #importing task1 file
from character import CharacterAnalyser #importing task2 file
from word import WordAnalyser #importing task3 file
from visualiser import AnalysisVisualiser # importing task4 file
import sys
#Making the objects of the respective classes
p=Preprocessor()
c=CharacterAnalyser()
w=WordAnalyser()
#function to convert normal df into relative df
def rel_func(accept_df):
accept_df=accept_df
total=sum(accept_df['count']) #counts the total of the column with counts
accept_df['rel_freq']= (accept_df['count']/total) #calculate the reklative frequency
return accept_df
#Below method performs required processing of data and return the relative frequency df
def perform_processing(input_file):
input_file=input_file
p.tokenise_word(input_file) #send the words to get tokenised
tokenised_punc_char_list=p.get_tokenised_punc_char_list() # retrieves the tokenised list which contains punctuation and characters
c.analyse_characters(tokenised_punc_char_list) #retrieves the punctuation frequency
#Required for task 4
pun_freq=c.get_punctuation_frequency() #store the punctuation frequency
letter_freq=c.get_letter_frequency() #store the letter frequency
analyse_words=p.get_tokenised_word_list() #get the tokensied word list
w.analyse_words(analyse_words) #send it for processing
#Required for task 4
stopword_freq=w.get_stopword_frequency() #store the stopword frequency
word_length_freq=w.get_word_length_frequency() #store the word length frequency
#relative freq of pun
pun_rel_freq=rel_func(pun_freq) #convert normal df into relative frequency df
pun_rel_freq.set_index('char', inplace=True)
pun_rel_freq=pun_freq[['rel_freq']]
#relative freq of letter
letter_rel_freq=rel_func(letter_freq) #convert normal df into relative frequency df
letter_rel_freq.set_index('char', inplace=True)
letter_rel_freq=letter_rel_freq[['rel_freq']]
#relative freq of stop word
stopword_rel_freq=rel_func(stopword_freq) #convert normal df into relative frequency df
stopword_rel_freq.set_index('stop_word', inplace=True)
stopword_rel_freq=stopword_rel_freq[['rel_freq']]
#relative freq of stop word length
wordlen_rel_freq=rel_func(word_length_freq) #convert normal df into relative frequency df
wordlen_rel_freq.set_index('wordlen', inplace=True)
wordlen_rel_freq=wordlen_rel_freq[['rel_freq']]
return pun_rel_freq,letter_rel_freq,stopword_rel_freq,wordlen_rel_freq
#Below method is used to implement the visualisation
def visualise(selection,accept_stats_df):
if selection == 'pun': # if the visualisation is punctuation then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_punctuation_frequency()
elif selection == 'letter': # if the visualisation is letter then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_character_frequency()
elif selection == 'stopword': # if the visualisation is stop word then proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_stopword_frequency()
else: # else the visualisation is word length and proceed
a=AnalysisVisualiser(accept_stats_df)
a.visualise_word_length_frequency()
def main():
try:
#Read the 6 files and store them
with open('Edward_II_Marlowe.tok', 'r') as input_file:
edward_inputfile = input_file.read()
input_file.close()
with open('Hamlet_Shakespeare.tok', 'r') as input_file:
hamplet_inputfile = input_file.read()
input_file.close()
with open('Henry_VI_Part1_Shakespeare.tok', 'r') as input_file:
henry_part1_inputfile = input_file.read()
input_file.close()
with open('Henry_VI_Part2_Shakespeare.tok', 'r') as input_file:
henry_part2_inputfile = input_file.read()
input_file.close()
with open('Jew_of_Malta_Marlowe.tok', 'r') as input_file:
jew_inputfile = input_file.read()
input_file.close()
with open('Richard_II_Shakespeare.tok', 'r') as input_file:
richard_inputfile = input_file.read()
input_file.close()
#in below step send the individual input file to processing and the return has respective frequency of statistics
edward_pun,edward_letter,edward_stopword,edward_wordlen=perform_processing(edward_inputfile)
hamlet_pun,hamlet_letter,hamlet_stopword,hamlet_wordlen=perform_processing(hamplet_inputfile)
henry_part1_pun,henry_part1_letter,henry_part1_stopword,henry_part1_wordlen=perform_processing(henry_part1_inputfile)
henry_part2_pun,henry_part2_letter,henry_part2_stopword,henry_part2_wordlen=perform_processing(henry_part2_inputfile)
jew_pun,jew_letter,jew_stopword,jew_wordlen=perform_processing(jew_inputfile)
richard_pun,richard_letter,richard_stopword,richard_wordlen=perform_processing(richard_inputfile)
# Merge total Letter from 6 files into single df and print
total_letter_df=pd.DataFrame()
total_letter_df=pd.concat([edward_letter,hamlet_letter,henry_part1_letter,henry_part2_letter,jew_letter,richard_letter],axis=1)
total_letter_df=total_letter_df.fillna(0) #converting all nan into 0
total_letter_df.columns=['Edward_II_Marlowe','Hamlet_Shakespeare','Henry_VI_Part1_Shakespeare',
'Henry_VI_Part2_Shakespeare','Jew_of_Malta_Marlowe','Richard_II_Shakespeare']
print("\n ---------Comparison of all letter types---------\n",total_letter_df)
# Merge total punctuation from 6 files into single df and print
total_pun_df= | pd.DataFrame() | pandas.DataFrame |
"""
Unit tests for draft.py
"""
# Standard libraries
import json
import random
# Third-party libraries
import pandas as pd
# Local libraries
from turkey_bowl.draft import Draft
def test_Draft_instantiation():
# Setup - none necessary
# Exercise
draft = Draft(2020)
# Verify
assert draft.year == 2020
assert draft.output_dir.as_posix() == "archive/2020"
assert draft.draft_order_path.as_posix() == "archive/2020/2020_draft_order.json"
assert draft.draft_sheet_path.as_posix() == "archive/2020/2020_draft_sheet.xlsx"
assert draft.__repr__() == "Draft(2020)"
assert draft.__str__() == "Turkey Bowl Draft: 2020"
# Cleanup - none necessary
def test_Draft_setup_nothing_exists(tmp_path, monkeypatch, capsys):
""" Test Draft.setup() when no directories exist in root. """
# Setup - create temp archive dir (assumed to always exist)
tmp_archive_dir = tmp_path.joinpath("archive")
tmp_archive_dir.mkdir()
# Ensure nothing exists prior to Draft.setup() call
assert tmp_archive_dir.joinpath("2020").exists() is False
assert tmp_archive_dir.joinpath("2020/2020_draft_order.json").exists() is False
assert tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx").exists() is False
# Exercise
draft = Draft(2020)
# Override input() func to always return same list of participants
monkeypatch.setattr("builtins.input", lambda _: "logan, becca, dodd")
# Override output dirs to temp path created for testing
draft.output_dir = tmp_archive_dir.joinpath("2020")
draft.draft_order_path = tmp_archive_dir.joinpath("2020/2020_draft_order.json")
draft.draft_sheet_path = tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx")
# Set random seed for draft order consistency in testing
random.seed(42)
draft.setup()
# Verify
assert draft.year == 2020
assert draft.output_dir == tmp_archive_dir.joinpath("2020")
assert draft.draft_order_path == tmp_archive_dir.joinpath(
"2020/2020_draft_order.json"
)
assert draft.draft_sheet_path == tmp_archive_dir.joinpath(
"2020/2020_draft_sheet.xlsx"
)
assert tmp_archive_dir.joinpath("2020").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_order.json").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx").exists() is True
assert draft.participant_list == ["logan", "becca", "dodd"]
assert draft.draft_order == ["dodd", "logan", "becca"]
with open(
tmp_archive_dir.joinpath("2020/2020_draft_order.json"), "r"
) as written_json:
loaded_json = json.load(written_json)
assert list(loaded_json.keys()) == ["dodd", "logan", "becca"]
draft_sheet_data = pd.read_excel(
tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx"),
sheet_name=None,
engine="xlrd",
)
assert list(draft_sheet_data) == ["Dodd", "Logan", "Becca"]
for participant_draft_info in draft_sheet_data.values():
assert list(participant_draft_info.columns) == ["Position", "Player", "Team"]
assert participant_draft_info["Position"].equals(
pd.Series(
[
"QB",
"RB_1",
"RB_2",
"WR_1",
"WR_2",
"TE",
"Flex (RB/WR/TE)",
"K",
"Defense (Team Name)",
"Bench (RB/WR/TE)",
]
)
)
captured = capsys.readouterr()
assert captured.out == (
"\nDrafting in slot 1...\ndodd\n"
+ "\nDrafting in slot 2...\nlogan\n"
+ "\nDrafting in slot 3...\nbecca\n\n"
+ "\n\tDraft Order: ['dodd', 'logan', 'becca']\n"
+ f"\tSaved draft order to {tmp_archive_dir.joinpath('2020/2020_draft_order.json')}\n"
)
# Cleanup - none necessary
def test_Draft_setup_already_exists(tmp_path, capsys):
""" Test Draft.setup() when files exist in root. """
# Setup
tmp_archive_dir = tmp_path.joinpath("archive")
tmp_archive_dir.mkdir()
tmp_year_dir = tmp_archive_dir.joinpath("2020")
tmp_year_dir.mkdir()
existing_draft_order = ["yeager", "emily", "dodd", "logan", "becca_hud", "cindy"]
existing_draft_order_dict = {
"yeager": 1,
"emily": 2,
"dodd": 3,
"logan": 4,
"becca_hud": 5,
"cindy": 6,
}
with open(tmp_year_dir.joinpath("2020_draft_order.json"), "w") as written_json:
json.dump(existing_draft_order_dict, written_json)
draft_info = {
"Position": [
"QB",
"RB_1",
"RB_2",
"WR_1",
"WR_2",
"TE",
"Flex (RB/WR/TE)",
"K",
"Defense (Team Name)",
"Bench (RB/WR/TE)",
],
"Player": [" "] * 10,
"Team": [" "] * 10,
}
draft_df = pd.DataFrame(draft_info)
with pd.ExcelWriter(tmp_year_dir.joinpath("2020_draft_sheet.xlsx")) as writer:
for participant in existing_draft_order:
draft_df.to_excel(writer, sheet_name=participant.title(), index=False)
# Ensure everything exists prior to Draft.setup() call
assert tmp_archive_dir.joinpath("2020").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_order.json").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx").exists() is True
# Exercise
draft = Draft(2020)
# Override output dirs to temp path crated for testing
draft.output_dir = tmp_archive_dir.joinpath("2020")
draft.draft_order_path = tmp_archive_dir.joinpath("2020/2020_draft_order.json")
draft.draft_sheet_path = tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx")
draft.setup()
# Verify
assert draft.year == 2020
assert draft.output_dir == tmp_archive_dir.joinpath("2020")
assert draft.__repr__() == "Draft(2020)"
assert draft.__str__() == "Turkey Bowl Draft: 2020"
assert draft.draft_order_path == tmp_archive_dir.joinpath(
"2020/2020_draft_order.json"
)
assert draft.draft_sheet_path == tmp_archive_dir.joinpath(
"2020/2020_draft_sheet.xlsx"
)
assert tmp_archive_dir.joinpath("2020").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_order.json").exists() is True
assert tmp_archive_dir.joinpath("2020/2020_draft_sheet.xlsx").exists() is True
assert draft.participant_list == existing_draft_order
assert draft.draft_order == existing_draft_order
captured = capsys.readouterr()
assert captured.out == (
f"\nDraft order already exists at {tmp_archive_dir.joinpath('2020/2020_draft_order.json')}\n"
+ f"\n\tDraft Order: {existing_draft_order}\n"
)
# Cleanup - none necessary
def test_Draft_load(tmp_path):
# Setup
tmp_archive_dir = tmp_path.joinpath("archive")
tmp_archive_dir.mkdir()
tmp_year_dir = tmp_archive_dir.joinpath("2005")
tmp_year_dir.mkdir()
existing_draft_order = ["yeager", "emily", "dodd", "logan", "becca_hud", "cindy"]
existing_draft_order_dict = {
"yeager": 1,
"emily": 2,
"dodd": 3,
"logan": 4,
"becca_hud": 5,
"cindy": 6,
}
with open(tmp_year_dir.joinpath("2005_draft_order.json"), "w") as written_json:
json.dump(existing_draft_order_dict, written_json)
draft_info = {
"Position": [
"QB",
"RB_1",
"RB_2",
"WR_1",
"WR_2",
"TE",
"Flex (RB/WR/TE)",
"K",
"Defense (Team Name)",
"Bench (RB/WR/TE)",
],
"Player": ["test"] * 10,
"Team": ["test"] * 10,
}
draft_df = pd.DataFrame(draft_info)
with pd.ExcelWriter(tmp_year_dir.joinpath("2005_draft_sheet.xlsx")) as writer:
for participant in existing_draft_order:
draft_df.to_excel(writer, sheet_name=participant.title(), index=False)
# Ensure everything exists prior to Draft.setup() call
assert tmp_archive_dir.joinpath("2005").exists() is True
assert tmp_archive_dir.joinpath("2005/2005_draft_order.json").exists() is True
assert tmp_archive_dir.joinpath("2005/2005_draft_sheet.xlsx").exists() is True
# Exercise
draft = Draft(2005)
# Override output dirs to temp path crated for testing
draft.output_dir = tmp_archive_dir.joinpath("2005")
draft.draft_order_path = tmp_archive_dir.joinpath("2005/2005_draft_order.json")
draft.draft_sheet_path = tmp_archive_dir.joinpath("2005/2005_draft_sheet.xlsx")
draft.setup()
result = draft.load()
# Verify
assert list(result.keys()) == [
"Yeager",
"Emily",
"Dodd",
"Logan",
"Becca_Hud",
"Cindy",
]
assert draft.year == 2005
assert draft.output_dir == tmp_archive_dir.joinpath("2005")
assert draft.__repr__() == "Draft(2005)"
assert draft.__str__() == "Turkey Bowl Draft: 2005"
assert draft.draft_order_path == tmp_archive_dir.joinpath(
"2005/2005_draft_order.json"
)
assert draft.draft_sheet_path == tmp_archive_dir.joinpath(
"2005/2005_draft_sheet.xlsx"
)
assert tmp_archive_dir.joinpath("2005").exists() is True
assert tmp_archive_dir.joinpath("2005/2005_draft_order.json").exists() is True
assert tmp_archive_dir.joinpath("2005/2005_draft_sheet.xlsx").exists() is True
assert draft.participant_list == existing_draft_order
assert draft.draft_order == existing_draft_order
# Cleanup - none necessary
def test_Draft_load_stripping_whitespace(tmp_path):
# Setup
expected_players = [
"QB with spaces",
"RB_1 with spaces",
"RB_2 with spaces",
"WR_1",
"WR_2",
"TE",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
expected_teams = [
"MIA",
"DAL",
"CHI",
"NE",
"HOU",
"GB",
"PIT",
"<NAME>",
"<NAME>",
"<NAME>",
]
tmp_archive_dir = tmp_path.joinpath("archive")
tmp_archive_dir.mkdir()
tmp_year_dir = tmp_archive_dir.joinpath("2005")
tmp_year_dir.mkdir()
existing_draft_order = ["yeager", "emily", "dodd", "logan", "becca_hud", "cindy"]
existing_draft_order_dict = {
"yeager": 1,
"emily": 2,
"dodd": 3,
"logan": 4,
"becca_hud": 5,
"cindy": 6,
}
with open(tmp_year_dir.joinpath("2005_draft_order.json"), "w") as written_json:
json.dump(existing_draft_order_dict, written_json)
draft_info = {
"Position": [
"QB",
"RB_1",
"RB_2",
"WR_1",
"WR_2",
"TE",
"Flex (RB/WR/TE)",
"K",
"Defense (Team Name)",
"Bench (RB/WR/TE)",
],
"Player": [
" QB with spaces ",
" RB_1 with spaces ",
"RB_2 with spaces ",
"WR_1 ",
" WR_2",
"TE ",
" <NAME> ",
" <NAME> ",
" Chicago Bears ",
" <NAME> ",
],
"Team": [
" MIA ",
" DAL ",
"CHI ",
"NE ",
" HOU",
"GB ",
" PIT ",
" <NAME> ",
" Chicago Bears ",
" <NAME> ",
],
}
draft_df = pd.DataFrame(draft_info)
with pd.ExcelWriter(tmp_year_dir.joinpath("2005_draft_sheet.xlsx")) as writer:
for participant in existing_draft_order:
draft_df.to_excel(writer, sheet_name=participant.title(), index=False)
# Exercise
draft = Draft(2005)
# Override output dirs to temp path crated for testing
draft.output_dir = tmp_archive_dir.joinpath("2005")
draft.draft_order_path = tmp_archive_dir.joinpath("2005/2005_draft_order.json")
draft.draft_sheet_path = tmp_archive_dir.joinpath("2005/2005_draft_sheet.xlsx")
draft.setup()
result = draft.load()
# Verify
assert list(result.keys()) == [
"Yeager",
"Emily",
"Dodd",
"Logan",
"Becca_Hud",
"Cindy",
]
for participant, participant_team in result.items():
assert participant_team["Player"].tolist() == expected_players
assert participant_team["Team"].tolist() == expected_teams
def test_Draft_check_players_have_been_drafted_false():
# Setup
mock_player_data = {"Player": ["", "", ""]}
mock_participant_teams = {
"Logan": pd.DataFrame(mock_player_data),
"Dodd": pd.DataFrame(mock_player_data),
"Becca": pd.DataFrame(mock_player_data),
}
# Exercise
draft = Draft(2020)
result = draft.check_players_have_been_drafted(mock_participant_teams)
# Verify
assert result is False
# Cleanup - none necessary
def test_Draft_check_players_have_been_drafted_true():
# Setup
mock_player_data = {"Player": ["<NAME>", "<NAME>", "Trogdor"]}
mock_participant_teams = {
"Logan": pd.DataFrame(mock_player_data),
"Dodd": | pd.DataFrame(mock_player_data) | pandas.DataFrame |
import re
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# ---------------------------------------------------
# Person data methods
# ---------------------------------------------------
class TransformGenderGetFromName:
"""Gets clients' genders from theirs russian second names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
column_gender (str): Column name in InsolverDataFrame for clients' genders.
gender_male (str): Return value for male gender in InsolverDataFrame, 'male' by default.
gender_female (str): Return value for female gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_name, column_gender, gender_male='male', gender_female='female'):
self.priority = 0
self.column_name = column_name
self.column_gender = column_gender
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _gender(client_name, gender_male, gender_female):
if pd.isnull(client_name):
gender = None
elif len(client_name) < 2:
gender = None
elif client_name.upper().endswith(('ИЧ', 'ОГЛЫ')):
gender = gender_male
elif client_name.upper().endswith(('НА', 'КЫЗЫ')):
gender = gender_female
else:
gender = None
return gender
def __call__(self, df):
df[self.column_gender] = df[self.column_name].apply(self._gender, args=(self.gender_male, self.gender_female,))
return df
class TransformAgeGetFromBirthday:
"""Gets clients' ages in years from theirs birth dates and policies' start dates.
Parameters:
column_date_birth (str): Column name in InsolverDataFrame containing clients' birth dates, column type is date.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_age (str): Column name in InsolverDataFrame for clients' ages in years, column type is int.
"""
def __init__(self, column_date_birth, column_date_start, column_age):
self.priority = 0
self.column_date_birth = column_date_birth
self.column_date_start = column_date_start
self.column_age = column_age
@staticmethod
def _age_get(datebirth_datestart):
date_birth = datebirth_datestart[0]
date_start = datebirth_datestart[1]
if pd.isnull(date_birth):
age = None
elif pd.isnull(date_start):
age = None
elif date_birth > datetime.datetime.now():
age = None
elif date_birth.year < datetime.datetime.now().year - 120:
age = None
elif date_birth > date_start:
age = None
else:
age = int((date_start - date_birth).days // 365.25)
return age
def __call__(self, df):
df[self.column_age] = df[[self.column_date_birth, self.column_date_start]].apply(self._age_get, axis=1)
return df
class TransformAge:
"""Transforms values of drivers' minimum ages in years.
Values under 'age_min' are invalid. Values over 'age_max' will be grouped.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
age_min (int): Minimum value of drivers' age in years, lower values are invalid, 18 by default.
age_max (int): Maximum value of drivers' age in years, bigger values will be grouped, 70 by default.
"""
def __init__(self, column_driver_minage, age_min=18, age_max=70):
self.priority = 1
self.column_driver_minage = column_driver_minage
self.age_min = age_min
self.age_max = age_max
@staticmethod
def _age(age, age_min, age_max):
if pd.isnull(age):
age = None
elif age < age_min:
age = None
elif age > age_max:
age = age_max
return age
def __call__(self, df):
df[self.column_driver_minage] = df[self.column_driver_minage].apply(self._age,
args=(self.age_min, self.age_max))
return df
class TransformAgeGender:
"""Gets intersections of drivers' minimum ages and genders.
Parameters:
column_age (str): Column name in InsolverDataFrame containing clients' ages in years, column type is integer.
column_gender (str): Column name in InsolverDataFrame containing clients' genders.
column_age_m (str): Column name in InsolverDataFrame for males' ages, for females default value is applied,
column type is integer.
column_age_f (str): Column name in InsolverDataFrame for females' ages, for males default value is applied,
column type is integer.
age_default (int): Default value of the age in years,18 by default.
gender_male: Value for male gender in InsolverDataFrame, 'male' by default.
gender_female: Value for male gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_age, column_gender, column_age_m, column_age_f, age_default=18,
gender_male='male', gender_female='female'):
self.priority = 2
self.column_age = column_age
self.column_gender = column_gender
self.column_age_m = column_age_m
self.column_age_f = column_age_f
self.age_default = age_default
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _age_gender(age_gender, age_default, gender_male, gender_female):
age = age_gender[0]
gender = age_gender[1]
if pd.isnull(age):
age_m = None
age_f = None
elif | pd.isnull(gender) | pandas.isnull |
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
# Downloads, unpacks and reads DBpedia dataset.
dbpedia = learn.datasets.load_dataset('dbpedia')
X_train, y_train = pandas.DataFrame(dbpedia.train.data)[1], pandas.Series(dbpedia.train.target)
X_test, y_test = | pandas.DataFrame(dbpedia.test.data) | pandas.DataFrame |
# -*- encoding: utf-8 -*-
import random
import warnings
from contextlib import contextmanager
from collections import OrderedDict, Counter, defaultdict
try:
from StringIO import StringIO # py2 (first as py2 also has io.StringIO, but only with unicode support)
except:
from io import StringIO # py3
import h2o
import numpy as np
from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.exceptions import H2OValueError
def _display(object):
"""
Display the object.
:param object: An object to be displayed.
:returns: the input
"""
import matplotlib.figure
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(object, matplotlib.figure.Figure) and matplotlib.get_backend().lower() != "agg":
plt.show()
else:
try:
import IPython.display
IPython.display.display(object)
except ImportError:
print(object)
if isinstance(object, matplotlib.figure.Figure):
plt.close(object)
print("\n")
return object
def _dont_display(object):
"""
Don't display the object
:param object: that should not be displayed
:returns: input
"""
import matplotlib.figure
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(object, matplotlib.figure.Figure):
plt.close()
return object
# UTILS
class Header:
"""
Class representing a Header with pretty printing for IPython.
"""
def __init__(self, content, level=1):
self.content = content
self.level = level
def _repr_html_(self):
return "<h{level}>{content}</h{level}>".format(level=self.level, content=self.content)
def _repr_markdown_(self):
return "\n\n{} {}".format("#" * self.level, self.content)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._repr_markdown_()
class Description:
"""
Class representing a Description with pretty printing for IPython.
"""
DESCRIPTIONS = dict(
leaderboard="Leaderboard shows models with their metrics. When provided with H2OAutoML object, "
"the leaderboard shows 5-fold cross-validated metrics by default (depending on the "
"H2OAutoML settings), otherwise it shows metrics computed on the frame. "
"At most 20 models are shown by default.",
leaderboard_row="Leaderboard shows models with their metrics and their predictions for a given row. "
"When provided with H2OAutoML object, the leaderboard shows 5-fold cross-validated "
"metrics by default (depending on the H2OAutoML settings), otherwise it shows "
"metrics computed on the frame. At most 20 models are shown by default.",
confusion_matrix="Confusion matrix shows a predicted class vs an actual class.",
residual_analysis="Residual Analysis plots the fitted values vs residuals on a test dataset. Ideally, "
"residuals should be randomly distributed. Patterns in this plot can indicate potential "
"problems with the model selection, e.g., using simpler model than necessary, not accounting "
"for heteroscedasticity, autocorrelation, etc. Note that if you see \"striped\" lines of "
"residuals, that is an artifact of having an integer valued (vs a real valued) "
"response variable.",
variable_importance="The variable importance plot shows the relative importance of the most "
"important variables in the model.",
varimp_heatmap="Variable importance heatmap shows variable importance across multiple models. "
"Some models in H2O return variable importance for one-hot (binary indicator) "
"encoded versions of categorical columns (e.g. Deep Learning, XGBoost). "
"In order for the variable importance of categorical columns to be compared "
"across all model types we compute a summarization of the the variable importance "
"across all one-hot encoded features and return a single variable importance for the "
"original categorical feature. By default, the models and variables are ordered by "
"their similarity.",
model_correlation_heatmap="This plot shows the correlation between the predictions of the models. "
"For classification, frequency of identical predictions is used. By default, "
"models are ordered by their similarity (as computed by hierarchical clustering). "
"Interpretable models, such as GAM, GLM, and RuleFit are highlighted using "
"red colored text.",
shap_summary="SHAP summary plot shows the contribution of the features for each instance (row of data). "
"The sum of the feature contributions and the bias term is equal to the raw prediction of "
"the model, i.e., prediction before applying inverse link function.",
pdp="Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable on "
"the response. The effect of a variable is measured in change in the mean response. PDP assumes "
"independence between the feature for which is the PDP computed and the rest.",
ice="An Individual Conditional Expectation (ICE) plot gives a graphical depiction of the marginal effect "
"of a variable on the response. ICE plots are similar to partial dependence plots (PDP); PDP shows the "
"average effect of a feature while ICE plot shows the effect for a single instance. This function will "
"plot the effect for each decile. In contrast to the PDP, ICE plots can provide more insight, especially "
"when there is stronger feature interaction.",
ice_row="Individual conditional expectations (ICE) plot gives a graphical depiction of the marginal "
"effect of a variable on the response for a given row. ICE plot is similar to partial "
"dependence plot (PDP), PDP shows the average effect of a feature while ICE plot shows "
"the effect for a single instance.",
shap_explain_row="SHAP explanation shows contribution of features for a given instance. The sum "
"of the feature contributions and the bias term is equal to the raw prediction of "
"the model, i.e., prediction before applying inverse link function. H2O implements "
"TreeSHAP which when the features are correlated, can increase contribution of a feature "
"that had no influence on the prediction.",
)
def __init__(self, for_what):
self.content = self.DESCRIPTIONS[for_what]
def _repr_html_(self):
return "<blockquote>{}</blockquote>".format(self.content)
def _repr_markdown_(self):
return "\n> {}".format(self.content)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._repr_markdown_()
class H2OExplanation(OrderedDict):
def _ipython_display_(self):
from IPython.display import display
for v in self.values():
display(v)
@contextmanager
def no_progress():
"""
A context manager that temporarily blocks showing the H2O's progress bar.
Used when a multiple models are evaluated.
"""
progress = h2o.job.H2OJob.__PROGRESS_BAR__
if progress:
h2o.no_progress()
try:
yield
finally:
if progress:
h2o.show_progress()
class NumpyFrame:
"""
Simple class that very vaguely emulates Pandas DataFrame.
Main purpose is to keep parsing from the List of Lists format to numpy.
This class is meant to be used just in the explain module.
Due to that fact it encodes the factor variables similarly to R/pandas -
factors are mapped to numeric column which in turn makes it easier to plot it.
"""
def __init__(self, h2o_frame):
# type: ("NumpyFrame", Union[h2o.H2OFrame, h2o.two_dim_table.H2OTwoDimTable]) -> None
if isinstance(h2o_frame, h2o.two_dim_table.H2OTwoDimTable):
self._columns = h2o_frame.col_header
_is_numeric = np.array([type_ in ["double", "float", "long", "integer"]
for type_ in h2o_frame.col_types], dtype=bool)
_is_factor = np.array([type_ in ["string"] for type_ in h2o_frame.col_types],
dtype=bool)
df = h2o_frame.cell_values
self._factors = dict()
for col in range(len(self._columns)):
if _is_factor[col]:
levels = set(row[col] for row in df)
self._factors[self._columns[col]] = list(levels)
self._data = np.empty((len(df), len(self._columns)))
df = [self._columns] + df
elif isinstance(h2o_frame, h2o.H2OFrame):
_is_factor = np.array(h2o_frame.isfactor(), dtype=np.bool) | np.array(
h2o_frame.ischaracter(), dtype=np.bool)
_is_numeric = h2o_frame.isnumeric()
self._columns = h2o_frame.columns
self._factors = {col: h2o_frame[col].asfactor().levels()[0] for col in
np.array(h2o_frame.columns)[_is_factor]}
df = h2o_frame.as_data_frame(False)
self._data = np.empty((h2o_frame.nrow, h2o_frame.ncol))
else:
raise RuntimeError("Unexpected type of \"h2o_frame\": {}".format(type(h2o_frame)))
for idx, col in enumerate(df[0]):
if _is_factor[idx]:
convertor = self.from_factor_to_num(col)
self._data[:, idx] = np.array(
[float(convertor.get(
row[idx] if not (len(row) == 0 or row[idx] == "") else "nan", "nan"))
for row in df[1:]], dtype=np.float32)
elif _is_numeric[idx]:
self._data[:, idx] = np.array(
[float(row[idx] if not (len(row) == 0 or row[idx] == "") else "nan") for row in
df[1:]],
dtype=np.float32)
else:
try:
self._data[:, idx] = np.array([row[idx] if not (len(row) == 0 or row[idx] == "")
else "nan" for row in df[1:]],
dtype=np.datetime64)
except Exception:
raise RuntimeError("Unexpected type of column {}!".format(col))
def isfactor(self, column):
# type: ("NumpyFrame", str) -> bool
"""
Is column a factor/categorical column?
:param column: string containing the column name
:returns: boolean
"""
return column in self._factors or self._get_column_and_factor(column)[0] in self._factors
def from_factor_to_num(self, column):
# type: ("NumpyFrame", str) -> Dict[str, int]
"""
Get a dictionary mapping a factor to its numerical representation in the NumpyFrame
:param column: string containing the column name
:returns: dictionary
"""
fact = self._factors[column]
return dict(zip(fact, range(len(fact))))
def from_num_to_factor(self, column):
# type: ("NumpyFrame", str) -> Dict[int, str]
"""
Get a dictionary mapping numerical representation of a factor to the category names.
:param column: string containing the column name
:returns: dictionary
"""
fact = self._factors[column]
return dict(zip(range(len(fact)), fact))
def _get_column_and_factor(self, column):
# type: ("NumpyFrame", str) -> Tuple[str, Optional[float]]
"""
Get a column name and possibly a factor name.
This is used to get proper column name and factor name when provided
with the output of some algos such as XGBoost which encode factor
columns to "column_name.category_name".
:param column: string containing the column name
:returns: tuple (column_name: str, factor_name: Optional[str])
"""
if column in self.columns:
return column, None
if column.endswith(".") and column[:-1] in self.columns:
return column[:-1], None
col_parts = column.split(".")
for i in range(1, len(col_parts) + 1):
if ".".join(col_parts[:i]) in self.columns:
column = ".".join(col_parts[:i])
factor_name = ".".join(col_parts[i:])
if factor_name == "missing(NA)":
factor = float("nan")
else:
factor = self.from_factor_to_num(column)[factor_name]
return column, factor
def __getitem__(self, indexer):
# type: ("NumpyFrame", Union[str, Tuple[Union[int,List[int]], str]]) -> np.ndarray
"""
A low level way to get a column or a row within a column.
NOTE: Returns numeric representation even for factors.
:param indexer: string for the whole column or a tuple (row_index, column_name)
:returns: a column or a row within a column
"""
row = slice(None)
if isinstance(indexer, tuple):
row = indexer[0]
column = indexer[1]
else:
column = indexer
if column not in self.columns:
column, factor = self._get_column_and_factor(column)
if factor is not None:
if factor != factor:
return np.asarray(np.isnan(self._data[row, self.columns.index(column)]),
dtype=np.float32)
return np.asarray(self._data[row, self.columns.index(column)] == factor,
dtype=np.float32)
return self._data[row, self.columns.index(column)]
def get(self, column, as_factor=True):
# type: ("NumpyFrame", str, bool) -> np.ndarray
"""
Get a column.
:param column: string containing the column name
:param as_factor: if True (default), factor column will contain string
representation; otherwise numerical representation
:returns: A column represented as numpy ndarray
"""
if as_factor and self.isfactor(column):
column, factor_idx = self._get_column_and_factor(column)
if factor_idx is not None:
return self[column] == factor_idx
convertor = self.from_num_to_factor(column)
return np.array([convertor.get(row, "") for row in self[column]])
return self[column]
def levels(self, column):
# type: ("NumpyFrame", str) -> List[str]
"""
Get levels/categories of a factor column.
:param column: a string containing the column name
:returns: list of levels
"""
return self._factors.get(column, [])
def nlevels(self, column):
# type: ("NumpyFrame", str) -> int
"""
Get number of levels/categories of a factor column.
:param column: string containing the column name
:returns: a number of levels within a factor
"""
return len(self.levels(column))
@property
def columns(self):
# type: ("NumpyFrame") -> List[str]
"""
Column within the NumpyFrame.
:returns: list of columns
"""
return self._columns
@property
def nrow(self):
# type: ("NumpyFrame") -> int
"""
Number of rows.
:returns: number of rows
"""
return self._data.shape[0]
@property
def ncol(self):
# type: ("NumpyFrame") -> int
"""
Number of columns.
:returns: number of columns
"""
return self._data.shape[1]
@property
def shape(self):
# type: ("NumpyFrame") -> Tuple[int, int]
"""
Shape of the frame.
:returns: tuple (number of rows, number of columns)
"""
return self._data.shape
def sum(self, axis=0):
# type: ("NumpyFrame", int) -> np.ndarray
"""
Calculate the sum of the NumpyFrame elements over the given axis.
WARNING: This method doesn't care if the column is categorical or numeric. Use with care.
:param axis: Axis along which a sum is performed.
:returns: numpy.ndarray with shape same as NumpyFrame with the `axis` removed
"""
return self._data.sum(axis=axis)
def mean(self, axis=0):
# type: ("NumpyFrame", int) -> np.ndarray
"""
Calculate the mean of the NumpyFrame elements over the given axis.
WARNING: This method doesn't care if the column is categorical or numeric. Use with care.
:param axis: Axis along which a mean is performed.
:returns: numpy.ndarray with shape same as NumpyFrame with the `axis` removed
"""
return self._data.mean(axis=axis)
def items(self, with_categorical_names=False):
# type: ("NumpyFrame", bool) -> Generator[Tuple[str, np.ndarray], None, None]
"""
Make a generator that yield column name and ndarray with values.
:params with_categorical_names: if True, factor columns are returned as string columns;
otherwise numerical
:returns: generator to be iterated upon
"""
for col in self.columns:
yield col, self.get(col, with_categorical_names)
def _get_domain_mapping(model):
"""
Get a mapping between columns and their domains.
:return: Dictionary containing a mapping column -> factors
"""
output = model._model_json["output"]
return dict(zip(output["names"], output["domains"]))
def _shorten_model_ids(model_ids):
import re
regexp = re.compile(r"(.*)_AutoML_[\d_]+((?:_.*)?)$") # nested group needed for Py2
shortened_model_ids = [regexp.sub(r"\1\2", model_id) for model_id in model_ids]
if len(set(shortened_model_ids)) == len(set(model_ids)):
return shortened_model_ids
return model_ids
def _get_algorithm(model, treat_xrt_as_algorithm=False):
# type: (Union[str, h2o.model.ModelBase], bool) -> str
"""
Get algorithm type. Use model id to infer it if possible.
:param model: model or a model_id
:param treat_xrt_as_algorithm: boolean used for best_of_family
:returns: string containing algorithm name
"""
if not isinstance(model, h2o.model.ModelBase):
import re
algo = re.search("^(DeepLearning|DRF|GAM|GBM|GLM|NaiveBayes|StackedEnsemble|RuleFit|XGBoost|XRT)(?=_)", model)
if algo is not None:
algo = algo.group(0).lower()
if algo == "xrt" and not treat_xrt_as_algorithm:
algo = "drf"
return algo
else:
model = h2o.get_model(model)
if treat_xrt_as_algorithm and model.algo == "drf":
if model.actual_params.get("histogram_type") == "Random":
return "xrt"
return model.algo
def _first_of_family(models, all_stackedensembles=False):
# type: (Union[str, h2o.model.ModelBase], bool) -> Union[str, h2o.model.ModelBase]
"""
Get first of family models
:param models: models or model ids
:param all_stackedensembles: if True return all stacked ensembles
:returns: list of models or model ids (the same type as on input)
"""
selected_models = []
included_families = set()
for model in models:
family = _get_algorithm(model, treat_xrt_as_algorithm=True)
if family not in included_families or (all_stackedensembles and "stackedensemble" == family):
selected_models.append(model)
included_families.add(family)
return selected_models
def _density(xs, bins=100):
# type: (np.ndarray, int) -> np.ndarray
"""
Make an approximate density estimation by blurring a histogram (used for SHAP summary plot).
:param xs: numpy vector
:param bins: number of bins
:returns: density values
"""
hist = list(np.histogram(xs, bins=bins))
# gaussian blur
hist[0] = np.convolve(hist[0],
[0.00598, 0.060626, 0.241843,
0.383103,
0.241843, 0.060626, 0.00598])[3:-3]
hist[0] = hist[0] / np.max(hist[0])
hist[1] = (hist[1][:-1] + hist[1][1:]) / 2
return np.interp(xs, hist[1], hist[0])
def _uniformize(data, col_name):
# type: (NumpyFrame, str) -> np.ndarray
"""
Convert to quantiles.
:param data: NumpyFrame
:param col_name: string containing a column name
:returns: quantile values of individual points in the column
"""
if col_name not in data.columns or data.isfactor(col_name):
res = data[col_name]
diff = (np.nanmax(res) - np.nanmin(res))
if diff <= 0 or np.isnan(diff):
return res
res = (res - np.nanmin(res)) / diff
return res
col = data[col_name]
xs = np.linspace(0, 1, 100)
quantiles = np.nanquantile(col, xs)
res = np.interp(col, quantiles, xs)
res = (res - np.nanmin(res)) / (np.nanmax(res) - np.nanmin(res))
return res
# PLOTS
def shap_summary_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=20, # type: int
samples=1000, # type: int
colorize_factors=True, # type: bool
alpha=1, # type: float
colormap=None, # type: str
figsize=(12, 12), # type: Union[Tuple[float], List[float]]
jitter=0.35 # type: float
): # type: (...) -> plt.Figure
"""
SHAP summary plot
SHAP summary plot shows contribution of features for each instance. The sum
of the feature contributions and the bias term is equal to the raw prediction
of the model, i.e., prediction before applying inverse link function.
:param model: h2o tree model, such as DRF, XRT, GBM, XGBoost
:param frame: H2OFrame
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
:param samples: maximum number of observations to use; if lower than number of rows in the
frame, take a random sample
:param colorize_factors: if True, use colors from the colormap to colorize the factors;
otherwise all levels will have same color
:param alpha: transparency of the points
:param colormap: colormap to use instead of the default blue to red colormap
:param figsize: figure size; passed directly to matplotlib
:param jitter: amount of jitter used to show the point density
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create SHAP summary plot
>>> gbm.shap_summary_plot(test)
"""
import matplotlib.colors
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
blue_to_red = matplotlib.colors.LinearSegmentedColormap.from_list("blue_to_red",
["#00AAEE", "#FF1166"])
if colormap is None:
colormap = blue_to_red
else:
colormap = plt.get_cmap(colormap)
if top_n_features < 0:
top_n_features = float("inf")
# to prevent problems with data sorted in some logical way
# (overplotting with latest result which might have different values
# then the rest of the data in a given region)
permutation = list(range(frame.nrow))
random.shuffle(permutation)
if samples is not None:
permutation = sorted(permutation[:min(len(permutation), samples)])
frame = frame[permutation, :]
permutation = list(range(frame.nrow))
random.shuffle(permutation)
with no_progress():
contributions = NumpyFrame(model.predict_contributions(frame))
frame = NumpyFrame(frame)
contribution_names = contributions.columns
feature_importance = sorted(
{k: np.abs(v).mean() for k, v in contributions.items() if "BiasTerm" != k}.items(),
key=lambda kv: kv[1])
if columns is None:
top_n = min(top_n_features, len(feature_importance))
top_n_features = [fi[0] for fi in feature_importance[-top_n:]]
else:
picked_cols = []
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
top_n_features = picked_cols
plt.figure(figsize=figsize)
plt.grid(True)
plt.axvline(0, c="black")
for i in range(len(top_n_features)):
col_name = top_n_features[i]
col = contributions[permutation, col_name]
dens = _density(col)
plt.scatter(
col,
i + dens * np.random.uniform(-jitter, jitter, size=len(col)),
alpha=alpha,
c=_uniformize(frame, col_name)[permutation]
if colorize_factors or not frame.isfactor(col_name)
else np.full(frame.nrow, 0.5),
cmap=colormap
)
plt.clim(0, 1)
cbar = plt.colorbar()
cbar.set_label('Normalized feature value', rotation=270)
cbar.ax.get_yaxis().labelpad = 15
plt.yticks(range(len(top_n_features)), top_n_features)
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.title("SHAP Summary plot for \"{}\"".format(model.model_id))
plt.tight_layout()
fig = plt.gcf()
return fig
def shap_explain_row_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
row_index, # type: int
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=10, # type: int
figsize=(16, 9), # type: Union[List[float], Tuple[float]]
plot_type="barplot", # type: str
contribution_type="both" # type: str
): # type: (...) -> plt.Figure
"""
SHAP local explanation
SHAP explanation shows contribution of features for a given instance. The sum
of the feature contributions and the bias term is equal to the raw prediction
of the model, i.e., prediction before applying inverse link function. H2O implements
TreeSHAP which when the features are correlated, can increase contribution of a feature
that had no influence on the prediction.
:param model: h2o tree model, such as DRF, XRT, GBM, XGBoost
:param frame: H2OFrame
:param row_index: row index of the instance to inspect
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
When plot_type="barplot", then top_n_features will be chosen for each contribution_type.
:param figsize: figure size; passed directly to matplotlib
:param plot_type: either "barplot" or "breakdown"
:param contribution_type: One of "positive", "negative", or "both".
Used only for plot_type="barplot".
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create SHAP row explanation plot
>>> gbm.shap_explain_row_plot(test, row_index=0)
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if top_n_features < 0:
top_n_features = float("inf")
row = frame[row_index, :]
with no_progress():
contributions = NumpyFrame(model.predict_contributions(row))
contribution_names = contributions.columns
prediction = float(contributions.sum(axis=1))
bias = float(contributions["BiasTerm"])
contributions = sorted(filter(lambda pair: pair[0] != "BiasTerm", contributions.items()),
key=lambda pair: -abs(pair[1]))
if plot_type == "barplot":
with no_progress():
prediction = model.predict(row)[0, "predict"]
row = NumpyFrame(row)
if contribution_type == "both":
contribution_type = ["positive", "negative"]
else:
contribution_type = [contribution_type]
if columns is None:
picked_features = []
if "positive" in contribution_type:
positive_features = sorted(filter(lambda pair: pair[1] >= 0, contributions),
key=lambda pair: pair[1])
picked_features.extend(positive_features[-min(top_n_features, len(positive_features)):])
if "negative" in contribution_type:
negative_features = sorted(filter(lambda pair: pair[1] < 0, contributions),
key=lambda pair: pair[1])
picked_features.extend(negative_features[:min(top_n_features, len(negative_features))])
else:
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
picked_cols = []
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
picked_features = [pair for pair in contributions if pair[0] in picked_cols]
picked_features = sorted(picked_features, key=lambda pair: pair[1])
if len(picked_features) < len(contributions):
contribution_subset_note = " using {} out of {} contributions".format(
len(picked_features), len(contributions))
else:
contribution_subset_note = ""
contributions = dict(
feature=np.array(
["{}={}".format(pair[0], str(row.get(pair[0])[0])) for pair in picked_features]),
value=np.array([pair[1][0] for pair in picked_features])
)
plt.figure(figsize=figsize)
plt.barh(range(contributions["feature"].shape[0]), contributions["value"], fc="#b3ddf2")
plt.grid(True)
plt.axvline(0, c="black")
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.yticks(range(contributions["feature"].shape[0]), contributions["feature"])
plt.title("SHAP explanation for \"{}\" on row {}{}\nprediction: {}".format(
model.model_id,
row_index,
contribution_subset_note,
prediction
))
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
elif plot_type == "breakdown":
if columns is None:
if top_n_features + 1 < len(contributions):
contributions = contributions[:top_n_features] + [
("Remaining Features", sum(map(lambda pair: pair[1], contributions[top_n_features:])))]
else:
picked_cols = []
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
rest = np.array(sum(pair[1] for pair in contributions if pair[0] not in picked_cols))
contributions = [pair for pair in contributions if pair[0] in picked_cols]
if len(contribution_names) - 1 > len(picked_cols): # Contribution names contain "BiasTerm" as well
contributions += [("Remaining Features", rest)]
contributions = contributions[::-1]
contributions = dict(
feature=np.array([pair[0] for pair in contributions]),
value=np.array([pair[1][0] for pair in contributions]),
color=np.array(["g" if pair[1] >= 0 else "r" for pair in contributions])
)
contributions["cummulative_value"] = [bias] + list(
contributions["value"].cumsum()[:-1] + bias)
plt.figure(figsize=figsize)
plt.barh(contributions["feature"], contributions["value"],
left=contributions["cummulative_value"],
color=contributions["color"])
plt.axvline(prediction, label="Prediction")
plt.axvline(bias, linestyle="dotted", color="gray", label="Bias")
plt.vlines(contributions["cummulative_value"][1:],
ymin=[y - 0.4 for y in range(contributions["value"].shape[0] - 1)],
ymax=[y + 1.4 for y in range(contributions["value"].shape[0] - 1)],
color="k")
plt.legend()
plt.grid(True)
xlim = plt.xlim()
xlim_diff = xlim[1] - xlim[0]
plt.xlim((xlim[0] - 0.02 * xlim_diff, xlim[1] + 0.02 * xlim_diff))
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
def _get_top_n_levels(column, top_n):
# type: (h2o.H2OFrame, int) -> List[str]
"""
Get top_n levels from factor column based on their frequency.
:param column: string containing column name
:param top_n: maximum number of levels to be returned
:returns: list of levels
"""
counts = column.table().sort("Count", ascending=[False])[:, 0]
return [
level[0]
for level in counts[:min(counts.nrow, top_n), :].as_data_frame(
use_pandas=False, header=False)
]
def _factor_mapper(mapping):
# type: (Dict) -> Callable
"""
Helper higher order function returning function that applies mapping to each element.
:param mapping: dictionary that maps factor names to floats (for NaN; other values are integers)
:returns: function to be applied on iterable
"""
def _(column):
return [mapping.get(entry, float("nan")) for entry in column]
return _
def _add_histogram(frame, column, add_rug=True, add_histogram=True, levels_order=None):
# type: (H2OFrame, str, bool, bool) -> None
"""
Helper function to add rug and/or histogram to a plot
:param frame: H2OFrame
:param column: string containing column name
:param add_rug: if True, adds rug
:param add_histogram: if True, adds histogram
:returns: None
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
ylims = plt.ylim()
nf = NumpyFrame(frame[column])
if nf.isfactor(column) and levels_order is not None:
new_mapping = dict(zip(levels_order, range(len(levels_order))))
mapping = _factor_mapper({k: new_mapping[v] for k, v in nf.from_num_to_factor(column).items()})
else:
def mapping(x):
return x
if add_rug:
plt.plot(mapping(nf[column]),
[ylims[0] for _ in range(frame.nrow)],
"|", color="k", alpha=0.2, ms=20)
if add_histogram:
if nf.isfactor(column):
cnt = Counter(nf[column][np.isfinite(nf[column])])
hist_x = np.array(list(cnt.keys()), dtype=float)
hist_y = np.array(list(cnt.values()), dtype=float)
width = 1
else:
hist_y, hist_x = np.histogram(
mapping(nf[column][np.isfinite(nf[column])]),
bins=20)
hist_x = hist_x[:-1].astype(float)
hist_y = hist_y.astype(float)
width = hist_x[1] - hist_x[0]
plt.bar(mapping(hist_x),
hist_y / hist_y.max() * ((ylims[1] - ylims[0]) / 1.618), # ~ golden ratio
bottom=ylims[0],
align="center" if nf.isfactor(column) else "edge",
width=width, color="gray", alpha=0.2)
if nf.isfactor(column):
plt.xticks(mapping(range(nf.nlevels(column))), nf.levels(column))
plt.ylim(ylims)
def pd_plot(
model, # type: h2o.model.model_base.ModelBase
frame, # type: h2o.H2OFrame
column, # type: str
row_index=None, # type: Optional[int]
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="Dark2", # type: str
):
"""
Plot partial dependence plot.
Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable
on the response. The effect of a variable is measured in change in the mean response.
PDP assumes independence between the feature for which is the PDP computed and the rest.
:param model: H2O Model object
:param frame: H2OFrame
:param column: string containing column name
:param row_index: if None, do partial dependence, if integer, do individual
conditional expectation for the row specified by this integer
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name; used to get just the first color to keep the api and color scheme similar with
pd_multi_plot
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create partial dependence plot
>>> gbm.pd_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
if row_index is not None:
levels = list(set(levels + [frame[row_index, column]]))
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
if target is not None and not isinstance(target, list):
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
color = plt.get_cmap(colormap)(0)
with no_progress():
plt.figure(figsize=figsize)
is_factor = frame[column].isfactor()[0]
if is_factor:
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
tmp = NumpyFrame(
model.partial_plot(frame, cols=[column], plot=False,
row_index=row_index, targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0])[0])
encoded_col = tmp.columns[0]
if is_factor:
plt.errorbar(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
yerr=tmp["stddev_response"], fmt='o', color=color,
ecolor=color, elinewidth=3, capsize=0, markersize=10)
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=color)
plt.fill_between(tmp[encoded_col], tmp["mean_response"] - tmp["stddev_response"],
tmp["mean_response"] + tmp["stddev_response"], color=color, alpha=0.2)
_add_histogram(frame, column)
if row_index is None:
plt.title("Partial Dependence plot for \"{}\"{}".format(
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Mean Response")
else:
if is_factor:
plt.axvline(factor_map([frame[row_index, column]]), c="k", linestyle="dotted",
label="Instance value")
else:
plt.axvline(frame[row_index, column], c="k", linestyle="dotted",
label="Instance value")
plt.title("Individual Conditional Expectation for column \"{}\" and row {}{}".format(
column,
row_index,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Response")
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.xlabel(column)
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout()
fig = plt.gcf()
return fig
def pd_multi_plot(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.model_base]]
frame, # type: h2o.H2OFrame
column, # type: str
best_of_family=True, # type: bool
row_index=None, # type: Optional[int]
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="Dark2", # type: str
markers=["o", "v", "s", "P", "*", "D", "X", "^", "<", ">", "."] # type: List[str]
): # type: (...) -> plt.Figure
"""
Plot partial dependencies of a variable across multiple models.
Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable
on the response. The effect of a variable is measured in change in the mean response.
PDP assumes independence between the feature for which is the PDP computed and the rest.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param frame: H2OFrame
:param column: string containing column name
:param best_of_family: if True, show only the best models per family
:param row_index: if None, do partial dependence, if integer, do individual
conditional expectation for the row specified by this integer
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name
:param markers: List of markers to use for factors, when it runs out of possible markers the last in
this list will get reused
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create a partial dependence plot
>>> aml.pd_multi_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if target is not None:
if isinstance(target, (list, tuple)):
if len(target) > 1:
raise ValueError("Only one target can be specified!")
target = target[0]
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
if _is_automl_or_leaderboard(models):
all_models = _get_model_ids_from_automl_or_leaderboard(models)
else:
all_models = models
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
if row_index is not None:
levels = list(set(levels + [frame[row_index, column]]))
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
if best_of_family:
models = _first_of_family(all_models)
else:
models = all_models
models = [m if isinstance(m, h2o.model.ModelBase) else h2o.get_model(m) for m in models]
colors = plt.get_cmap(colormap, len(models))(list(range(len(models))))
with no_progress():
plt.figure(figsize=figsize)
is_factor = frame[column].isfactor()[0]
if is_factor:
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
marker_map = dict(zip(range(len(markers) - 1), markers[:-1]))
model_ids = _shorten_model_ids([model.model_id for model in models])
for i, model in enumerate(models):
tmp = NumpyFrame(
model.partial_plot(frame, cols=[column], plot=False,
row_index=row_index, targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0])[0])
encoded_col = tmp.columns[0]
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
color=[colors[i]], label=model_ids[i],
marker=marker_map.get(i, markers[-1]))
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=colors[i],
label=model_ids[i])
_add_histogram(frame, column)
if row_index is None:
plt.title("Partial Dependence plot for \"{}\"{}".format(
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Mean Response")
else:
if is_factor:
plt.axvline(factor_map([frame[row_index, column]]), c="k", linestyle="dotted",
label="Instance value")
else:
plt.axvline(frame[row_index, column], c="k", linestyle="dotted",
label="Instance value")
plt.title("Individual Conditional Expectation for column \"{}\" and row {}{}".format(
column,
row_index,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Response")
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel(column)
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout(rect=[0, 0, 0.8, 1])
fig = plt.gcf()
return fig
def ice_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
column, # type: str
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="plasma", # type: str
): # type: (...) -> plt.Figure
"""
Plot Individual Conditional Expectations (ICE) for each decile
Individual conditional expectations (ICE) plot gives a graphical depiction of the marginal
effect of a variable on the response. ICE plot is similar to partial dependence plot (PDP),
PDP shows the average effect of a feature while ICE plot shows the effect for a single
instance. The following plot shows the effect for each decile. In contrast to partial
dependence plot, ICE plot can provide more insight especially when there is stronger feature interaction.
:param model: H2OModel
:param frame: H2OFrame
:param column: string containing column name
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create the individual conditional expectations plot
>>> gbm.ice_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if target is not None:
if isinstance(target, (list, tuple)):
if len(target) > 1:
raise ValueError("Only one target can be specified!")
target = target[0]
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
with no_progress():
frame = frame.sort(model.actual_params["response_column"])
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
deciles = [int(round(frame.nrow * dec / 10)) for dec in range(11)]
colors = plt.get_cmap(colormap, 11)(list(range(11)))
plt.figure(figsize=figsize)
for i, index in enumerate(deciles):
tmp = NumpyFrame(
model.partial_plot(
frame,
cols=[column],
plot=False,
row_index=index,
targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0]
)[0]
)
encoded_col = tmp.columns[0]
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
color=[colors[i]],
label="{}th Percentile".format(i * 10))
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=colors[i],
label="{}th Percentile".format(i * 10))
tmp = NumpyFrame(
model.partial_plot(
frame,
cols=[column],
plot=False,
targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0]
)[0]
)
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"], color="k",
label="Partial Dependence")
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color="k", linestyle="dashed",
label="Partial Dependence")
_add_histogram(frame, column)
plt.title("Individual Conditional Expectation for \"{}\"\non column \"{}\"{}".format(
model.model_id,
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout(rect=[0, 0, 0.85, 1])
fig = plt.gcf()
return fig
def _has_varimp(model):
# type: (h2o.model.ModelBase) -> bool
"""
Does model have varimp?
:param model: model or a string containing model_id
:returns: bool
"""
assert isinstance(model, h2o.model.ModelBase)
# check for cases when variable importance is disabled or
# when a model is stopped sooner than calculating varimp (xgboost can rarely have no varimp).
output = model._model_json["output"]
return output.get("variable_importances") is not None
def _is_automl_or_leaderboard(obj):
# type: (object) -> bool
"""
Is obj an H2OAutoML object or a leaderboard?
:param obj: object to test
:return: bool
"""
return (
isinstance(obj, h2o.automl._base.H2OAutoMLBaseMixin) or
(isinstance(obj, h2o.H2OFrame) and "model_id" in obj.columns)
)
def _get_model_ids_from_automl_or_leaderboard(automl_or_leaderboard, filter_=lambda _: True):
# type: (object) -> List[str]
"""
Get model ids from H2OAutoML object or leaderboard
:param automl_or_leaderboard: AutoML
:return: List[str]
"""
leaderboard = (automl_or_leaderboard.leaderboard
if isinstance(automl_or_leaderboard, h2o.automl._base.H2OAutoMLBaseMixin)
else automl_or_leaderboard)
return [model_id[0] for model_id in leaderboard[:, "model_id"].as_data_frame(use_pandas=False, header=False)
if filter_(model_id[0])]
def _get_models_from_automl_or_leaderboard(automl_or_leaderboard, filter_=lambda _: True):
# type: (object) -> Generator[h2o.model.ModelBase, None, None]
"""
Get model ids from H2OAutoML object or leaderboard
:param automl_or_leaderboard: AutoML
:param filter_: a predicate used to filter model_ids. Signature of the filter is (model) -> bool.
:return: Generator[h2o.model.ModelBase, None, None]
"""
models = (h2o.get_model(model_id) for model_id in _get_model_ids_from_automl_or_leaderboard(automl_or_leaderboard))
return (model for model in models if filter_(model))
def _get_xy(model):
# type: (h2o.model.ModelBase) -> Tuple[List[str], str]
"""
Get features (x) and the response column (y).
:param model: H2O Model
:returns: tuple (x, y)
"""
names = model._model_json["output"]["original_names"] or model._model_json["output"]["names"]
y = model.actual_params["response_column"]
not_x = [
y,
# if there is no fold column, fold_column is set to None, thus using "or {}" instead of the second argument of dict.get
(model.actual_params.get("fold_column") or {}).get("column_name"),
(model.actual_params.get("weights_column") or {}).get("column_name"),
(model.actual_params.get("offset_column") or {}).get("column_name"),
] + (model.actual_params.get("ignored_columns") or [])
x = [feature for feature in names if feature not in not_x]
return x, y
def _consolidate_varimps(model):
# type (h2o.model.ModelBase) -> Dict
"""
Get variable importances just for the columns that are present in the data set, i.e.,
when an encoded variables such as "column_name.level_name" are encountered, those variable
importances are summed to "column_name" variable.
:param model: H2O Model
:returns: dictionary with variable importances
"""
x, y = _get_xy(model)
varimp = {line[0]: line[3] for line in model.varimp()}
consolidated_varimps = {k: v for k, v in varimp.items() if k in x}
to_process = {k: v for k, v in varimp.items() if k not in x}
domain_mapping = _get_domain_mapping(model)
encoded_cols = ["{}.{}".format(name, domain)
for name, domains in domain_mapping.items()
if domains is not None
for domain in domains + ["missing(NA)"]]
if len(encoded_cols) > len(set(encoded_cols)):
duplicates = encoded_cols[:]
for x in set(encoded_cols):
duplicates.remove(x)
warnings.warn("Ambiguous encoding of the column x category pairs: {}".format(set(duplicates)))
varimp_to_col = {"{}.{}".format(name, domain): name
for name, domains in domain_mapping.items()
if domains is not None
for domain in domains + ["missing(NA)"]
}
for feature in to_process.keys():
if feature in varimp_to_col:
column = varimp_to_col[feature]
consolidated_varimps[column] = consolidated_varimps.get(column, 0) + to_process[feature]
else:
raise RuntimeError("Cannot find feature {}".format(feature))
total_value = sum(consolidated_varimps.values())
if total_value != 1:
consolidated_varimps = {k: v / total_value for k, v in consolidated_varimps.items()}
for col in x:
if col not in consolidated_varimps:
consolidated_varimps[col] = 0
return consolidated_varimps
# This plot is meant to be used only in the explain module.
# It provides the same capabilities as `model.varimp_plot` but without
# either forcing "Agg" backend or showing the plot.
# It also mimics the look and feel of the rest of the explain plots.
def _varimp_plot(model, figsize, num_of_features=None):
# type: (h2o.model.ModelBase, Tuple[Float, Float], Optional[int]) -> matplotlib.pyplot.Figure
"""
Variable importance plot.
:param model: H2O model
:param figsize: Figure size
:param num_of_features: Maximum number of variables to plot. Defaults to 10.
:return:
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
importances = model.varimp(use_pandas=False)
feature_labels = [tup[0] for tup in importances]
val = [tup[2] for tup in importances]
pos = range(len(feature_labels))[::-1]
if num_of_features is None:
num_of_features = min(len(val), 10)
plt.figure(figsize=figsize)
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features]) - 1, max(pos[0:num_of_features]) + 1])
plt.title("Variable Importance for \"{}\"".format(model.model_id))
plt.xlabel("Variable Importance")
plt.ylabel("Variable")
plt.grid()
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
def _interpretable(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Returns True if model_id is easily interpretable.
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["glm", "gam", "rulefit"]
def _flatten_list(items):
# type: (list) -> Generator[Any, None, None]
"""
Flatten nested lists.
:param items: a list potentionally containing other lists
:returns: flattened list
"""
for x in items:
if isinstance(x, list):
for xx in _flatten_list(x):
yield xx
else:
yield x
def _calculate_clustering_indices(matrix):
# type: (np.ndarray) -> list
"""
Get a hierarchical clustering leaves order calculated from the clustering of columns.
:param matrix: numpy.ndarray
:returns: list of indices of columns
"""
cols = matrix.shape[1]
dist = np.zeros((cols, cols))
for x in range(cols):
for y in range(cols):
if x < y:
dist[x, y] = np.sum(np.power(matrix[:, x] - matrix[:, y], 2))
dist[y, x] = dist[x, y]
elif x == y:
dist[x, x] = float("inf")
indices = [[i] for i in range(cols)]
for i in range(cols - 1):
idx = np.argmin(dist)
x = idx % cols
y = idx // cols
assert x != y
indices[x].append(indices[y])
indices[y] = []
dist[x, :] = np.min(dist[[x, y], :], axis=0)
dist[y, :] = float("inf")
dist[:, y] = float("inf")
dist[x, x] = float("inf")
result = list(_flatten_list(indices))
assert len(result) == cols
return result
def varimp_heatmap(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
top_n=None, # type: Option[int]
figsize=(16, 9), # type: Tuple[float]
cluster=True, # type: bool
colormap="RdYlBu_r" # type: str
):
# type: (...) -> plt.Figure
"""
Variable Importance Heatmap across a group of models
Variable importance heatmap shows variable importance across multiple models.
Some models in H2O return variable importance for one-hot (binary indicator)
encoded versions of categorical columns (e.g. Deep Learning, XGBoost). In order
for the variable importance of categorical columns to be compared across all model
types we compute a summarization of the the variable importance across all one-hot
encoded features and return a single variable importance for the original categorical
feature. By default, the models and variables are ordered by their similarity.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param top_n: DEPRECATED. use just top n models (applies only when used with H2OAutoML)
:param figsize: figsize: figure size; passed directly to matplotlib
:param cluster: if True, cluster the models and variables
:param colormap: colormap to use
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create the variable importance heatmap
>>> aml.varimp_heatmap()
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(models, h2o.automl._base.H2OAutoMLBaseMixin):
models = _check_deprecated_top_n_argument(models, top_n)
varimps, model_ids, x = varimp(models=models, cluster=cluster, use_pandas=False)
plt.figure(figsize=figsize)
plt.imshow(varimps, cmap=plt.get_cmap(colormap))
plt.xticks(range(len(model_ids)), model_ids,
rotation=45, rotation_mode="anchor", ha="right")
plt.yticks(range(len(x)), x)
plt.colorbar()
plt.xlabel("Model Id")
plt.ylabel("Feature")
plt.title("Variable Importance Heatmap")
plt.grid(False)
fig = plt.gcf()
return fig
def varimp(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
cluster=True, # type: bool
use_pandas=True # type: bool
):
# type: (...) -> Union[pandas.DataFrame, Tuple[numpy.ndarray, List[str], List[str]]]
"""
Get data that are used to build varimp_heatmap plot.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param cluster: if True, cluster the models and variables
:param use_pandas: if True, try to return pandas DataFrame. Otherwise return a triple (varimps, model_ids, variable_names)
:returns: either pandas DataFrame (if use_pandas == True) or a triple (varimps, model_ids, variable_names)
"""
if _is_automl_or_leaderboard(models):
models = list(_get_models_from_automl_or_leaderboard(models, filter_=_has_varimp))
else:
# Filter out models that don't have varimp
models = [model for model in models if _has_varimp(model)]
if len(models) == 0:
raise RuntimeError("No model with variable importance")
varimps = [_consolidate_varimps(model) for model in models]
x, y = _get_xy(models[0])
varimps = np.array([[varimp[col] for col in x] for varimp in varimps])
if cluster and len(models) > 2:
order = _calculate_clustering_indices(varimps)
x = [x[i] for i in order]
varimps = varimps[:, order]
varimps = varimps.transpose()
order = _calculate_clustering_indices(varimps)
models = [models[i] for i in order]
varimps = varimps[:, order]
else:
varimps = varimps.transpose()
model_ids = _shorten_model_ids([model.model_id for model in models])
if use_pandas:
import pandas
return | pandas.DataFrame(varimps, columns=model_ids, index=x) | pandas.DataFrame |
"""
July 2021
This code retrieves the calculation of sand use for concrete and glass production in the building sector in 26 global regions. For the original code & latest updates, see: https://github.com/
The dynamic material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
*NOTE: Insert location of GloBus-main folder in 'dir_path' (line 23) before running the code
Software version: Python 3.7
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
building_types = 4 #4 building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 2 #2 materials: Concrete, Glass
inflation = 1.2423 #gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean ==2:
file_addition = '_high'
elif flag_Mean ==3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials_concrete = pd.read_csv('files_material_density\Building_materials_concrete' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
building_materials_glass = pd.read_csv('files_material_density\Building_materials_glass' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial_concrete = pd.read_csv('files_material_density\materials_commercial_concrete' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
materials_commercial_glass = pd.read_csv('files_material_density\materials_commercial_glass' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
#pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index="t", columns="Region", values="Rural")
floorspace_urb = floorspace.pivot(index="t", columns="Region", values="Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index=range(1971,2061), columns=range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial --------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1-(rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1-(sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1-(sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1-(sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1-(sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1-(sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1-(sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=rurpop.columns)
pop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100+rurpop_trend_by_region[region-1])/100)**(1970-year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=urbpop.columns)
pop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index=False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index=False)
pop_tail = pop_1820_1970.append(pop2, ignore_index=False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index=False), ignore_index=False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index=False), ignore_index=False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index=False), ignore_index=False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False), ignore_index=False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False), ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False), ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False), ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False), ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False), ignore_index=False)
#%% SQUARE METER Calculations -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
# calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur)
people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_sem_rur = pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_app_rur = pd.DataFrame(housing_type_rur3.iloc[2].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_hig_rur = pd.DataFrame(housing_type_rur3.iloc[3].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_det_urb = pd.DataFrame(housing_type_urb3.iloc[0].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_sem_urb = pd.DataFrame(housing_type_urb3.iloc[1].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_app_urb = pd.DataFrame(housing_type_urb3.iloc[2].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_hig_urb = pd.DataFrame(housing_type_urb3.iloc[3].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
# calculate the total m2 (urban/rural) BY HOUSING TYPE (= nr. of people * OWN avg m2, so not based on IMAGE)
m2_unadjusted_det_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[0].values * people_det_rur.values, columns=people_det_rur.columns, index=people_det_rur.index)
m2_unadjusted_sem_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[1].values * people_sem_rur.values, columns=people_sem_rur.columns, index=people_sem_rur.index)
m2_unadjusted_app_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[2].values * people_app_rur.values, columns=people_app_rur.columns, index=people_app_rur.index)
m2_unadjusted_hig_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[3].values * people_hig_rur.values, columns=people_hig_rur.columns, index=people_hig_rur.index)
m2_unadjusted_det_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[0].values * people_det_urb.values, columns=people_det_urb.columns, index=people_det_urb.index)
m2_unadjusted_sem_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[1].values * people_sem_urb.values, columns=people_sem_urb.columns, index=people_sem_urb.index)
m2_unadjusted_app_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[2].values * people_app_urb.values, columns=people_app_urb.columns, index=people_app_urb.index)
m2_unadjusted_hig_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[3].values * people_hig_urb.values, columns=people_hig_urb.columns, index=people_hig_urb.index)
# Define empty dataframes for m2 adjustments
total_m2_adj_rur = pd.DataFrame(index=m2_unadjusted_det_rur.index, columns=m2_unadjusted_det_rur.columns)
total_m2_adj_urb = pd.DataFrame(index=m2_unadjusted_det_urb.index, columns=m2_unadjusted_det_urb.columns)
# Sum all square meters in Rural area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_rur.loc[j,str(i)] = m2_unadjusted_det_rur.loc[j,str(i)] + m2_unadjusted_sem_rur.loc[j,str(i)] + m2_unadjusted_app_rur.loc[j,str(i)] + m2_unadjusted_hig_rur.loc[j,str(i)]
# Sum all square meters in Urban area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_urb.loc[j,str(i)] = m2_unadjusted_det_urb.loc[j,str(i)] + m2_unadjusted_sem_urb.loc[j,str(i)] + m2_unadjusted_app_urb.loc[j,str(i)] + m2_unadjusted_hig_urb.loc[j,str(i)]
# average square meter per person implied by our OWN data
avg_m2_cap_adj_rur = pd.DataFrame(total_m2_adj_rur.values / people_rur.values, columns=people_rur.columns, index=people_rur.index)
avg_m2_cap_adj_urb = pd.DataFrame(total_m2_adj_urb.values / people_urb.values, columns=people_urb.columns, index=people_urb.index)
# factor to correct square meters per capita so that we respect the IMAGE data in terms of total m2, but we use our own distinction between Building types
m2_cap_adj_fact_rur = pd.DataFrame(floorspace_rur_tail.values / avg_m2_cap_adj_rur.values, columns=floorspace_rur_tail.columns, index=floorspace_rur_tail.index)
m2_cap_adj_fact_urb = pd.DataFrame(floorspace_urb_tail.values / avg_m2_cap_adj_urb.values, columns=floorspace_urb_tail.columns, index=floorspace_urb_tail.index)
# All m2 by region (in millions), Building_type & year (using the correction factor, to comply with IMAGE avg m2/cap)
m2_det_rur = pd.DataFrame(m2_unadjusted_det_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_sem_rur = pd.DataFrame(m2_unadjusted_sem_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_app_rur = pd.DataFrame(m2_unadjusted_app_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_hig_rur = pd.DataFrame(m2_unadjusted_hig_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_det_urb = pd.DataFrame(m2_unadjusted_det_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_sem_urb = pd.DataFrame(m2_unadjusted_sem_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_app_urb = pd.DataFrame(m2_unadjusted_app_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_hig_urb = pd.DataFrame(m2_unadjusted_hig_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
# Add a checksum to see if calculations based on adjusted OWN avg m2 (by building type) now match the total m2 according to IMAGE.
m2_sum_rur_OWN = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur
m2_sum_rur_IMAGE = pd.DataFrame(floorspace_rur_tail.values*people_rur.values, columns=m2_sum_rur_OWN.columns, index=m2_sum_rur_OWN.index)
m2_checksum = m2_sum_rur_OWN - m2_sum_rur_IMAGE
if m2_checksum.sum().sum() > 0.0000001 or m2_checksum.sum().sum() < -0.0000001:
ctypes.windll.user32.MessageBoxW(0, "IMAGE & OWN m2 sums do not match", "Warning", 1)
# total RESIDENTIAL square meters by region
m2 = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur + m2_det_urb + m2_sem_urb + m2_app_urb + m2_hig_urb
# Total m2 for COMMERCIAL Buildings
commercial_m2_office = pd.DataFrame(commercial_m2_cap_office_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_retail = pd.DataFrame(commercial_m2_cap_retail_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_hotels = pd.DataFrame(commercial_m2_cap_hotels_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_govern = pd.DataFrame(commercial_m2_cap_govern_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
#%% MATERIAL STOCK CALCULATIONS
#rural concrete stock
material_concrete_det=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Detached')]
material_concrete_det=material_concrete_det.set_index('Region')
material_concrete_det=material_concrete_det.drop(['Building_type'],axis=1)
material_concrete_det=pd.DataFrame(material_concrete_det.values.T, index=material_concrete_det.columns, columns=material_concrete_det.index)
a=m2_det_rur.index
material_concrete_det=material_concrete_det.set_index(a)
kg_det_rur_concrete=m2_det_rur*material_concrete_det
material_concrete_sem=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Semi-detached')]
material_concrete_sem=material_concrete_sem.set_index('Region')
material_concrete_sem=material_concrete_sem.drop(['Building_type'],axis=1)
material_concrete_sem=pd.DataFrame(material_concrete_sem.values.T, index=material_concrete_sem.columns, columns=material_concrete_sem.index)
a=m2_sem_rur.index
material_concrete_sem=material_concrete_sem.set_index(a)
kg_sem_rur_concrete=m2_sem_rur*material_concrete_sem
material_concrete_app=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Appartments')]
material_concrete_app=material_concrete_app.set_index('Region')
material_concrete_app=material_concrete_app.drop(['Building_type'],axis=1)
material_concrete_app=pd.DataFrame(material_concrete_app.values.T, index=material_concrete_app.columns, columns=material_concrete_app.index)
a=m2_app_rur.index
material_concrete_app=material_concrete_app.set_index(a)
kg_app_rur_concrete=m2_app_rur*material_concrete_app
material_concrete_hig=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='High-rise')]
material_concrete_hig=material_concrete_hig.set_index('Region')
material_concrete_hig=material_concrete_hig.drop(['Building_type'],axis=1)
material_concrete_hig=pd.DataFrame(material_concrete_hig.values.T, index=material_concrete_hig.columns, columns=material_concrete_hig.index)
a=m2_hig_rur.index
material_concrete_hig=material_concrete_hig.set_index(a)
kg_hig_rur_concrete=m2_hig_rur*material_concrete_hig
#urban concrete stock
material_concrete_det=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Detached')]
material_concrete_det=material_concrete_det.set_index('Region')
material_concrete_det=material_concrete_det.drop(['Building_type'],axis=1)
material_concrete_det=pd.DataFrame(material_concrete_det.values.T, index=material_concrete_det.columns, columns=material_concrete_det.index)
a=m2_det_urb.index
material_concrete_det=material_concrete_det.set_index(a)
kg_det_urb_concrete=m2_det_urb*material_concrete_det
material_concrete_sem=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Semi-detached')]
material_concrete_sem=material_concrete_sem.set_index('Region')
material_concrete_sem=material_concrete_sem.drop(['Building_type'],axis=1)
material_concrete_sem=pd.DataFrame(material_concrete_sem.values.T, index=material_concrete_sem.columns, columns=material_concrete_sem.index)
a=m2_sem_urb.index
material_concrete_sem=material_concrete_sem.set_index(a)
kg_sem_urb_concrete=m2_sem_urb*material_concrete_sem
material_concrete_app=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Appartments')]
material_concrete_app=material_concrete_app.set_index('Region')
material_concrete_app=material_concrete_app.drop(['Building_type'],axis=1)
material_concrete_app=pd.DataFrame(material_concrete_app.values.T, index=material_concrete_app.columns, columns=material_concrete_app.index)
a=m2_app_urb.index
material_concrete_app=material_concrete_app.set_index(a)
kg_app_urb_concrete=m2_app_urb*material_concrete_app
material_concrete_hig=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='High-rise')]
material_concrete_hig=material_concrete_hig.set_index('Region')
material_concrete_hig=material_concrete_hig.drop(['Building_type'],axis=1)
material_concrete_hig=pd.DataFrame(material_concrete_hig.values.T, index=material_concrete_hig.columns, columns=material_concrete_hig.index)
a=m2_hig_urb.index
material_concrete_hig=material_concrete_hig.set_index(a)
kg_hig_urb_concrete=m2_hig_urb*material_concrete_hig
#rural glass stock
material_glass_det=building_materials_glass.loc[(building_materials_glass['Building_type']=='Detached')]
material_glass_det=material_glass_det.set_index('Region')
material_glass_det=material_glass_det.drop(['Building_type'],axis=1)
material_glass_det=pd.DataFrame(material_glass_det.values.T, index=material_glass_det.columns, columns=material_glass_det.index)
a=m2_det_rur.index
material_glass_det=material_glass_det.set_index(a)
kg_det_rur_glass=m2_det_rur*material_glass_det
material_glass_sem=building_materials_glass.loc[(building_materials_glass['Building_type']=='Semi-detached')]
material_glass_sem=material_glass_sem.set_index('Region')
material_glass_sem=material_glass_sem.drop(['Building_type'],axis=1)
material_glass_sem=pd.DataFrame(material_glass_sem.values.T, index=material_glass_sem.columns, columns=material_glass_sem.index)
a=m2_sem_rur.index
material_glass_sem=material_glass_sem.set_index(a)
kg_sem_rur_glass=m2_sem_rur*material_glass_sem
material_glass_app=building_materials_glass.loc[(building_materials_glass['Building_type']=='Appartments')]
material_glass_app=material_glass_app.set_index('Region')
material_glass_app=material_glass_app.drop(['Building_type'],axis=1)
material_glass_app=pd.DataFrame(material_glass_app.values.T, index=material_glass_app.columns, columns=material_glass_app.index)
a=m2_app_rur.index
material_glass_app=material_glass_app.set_index(a)
kg_app_rur_glass=m2_app_rur*material_glass_app
material_glass_hig=building_materials_glass.loc[(building_materials_glass['Building_type']=='High-rise')]
material_glass_hig=material_glass_hig.set_index('Region')
material_glass_hig=material_glass_hig.drop(['Building_type'],axis=1)
material_glass_hig=pd.DataFrame(material_glass_hig.values.T, index=material_glass_hig.columns, columns=material_glass_hig.index)
a=m2_hig_rur.index
material_glass_hig=material_glass_hig.set_index(a)
kg_hig_rur_glass=m2_hig_rur*material_glass_hig
#urban glass stock
material_glass_det=building_materials_glass.loc[(building_materials_glass['Building_type']=='Detached')]
material_glass_det=material_glass_det.set_index('Region')
material_glass_det=material_glass_det.drop(['Building_type'],axis=1)
material_glass_det=pd.DataFrame(material_glass_det.values.T, index=material_glass_det.columns, columns=material_glass_det.index)
a=m2_det_urb.index
material_glass_det=material_glass_det.set_index(a)
kg_det_urb_glass=m2_det_urb*material_glass_det
material_glass_sem=building_materials_glass.loc[(building_materials_glass['Building_type']=='Semi-detached')]
material_glass_sem=material_glass_sem.set_index('Region')
material_glass_sem=material_glass_sem.drop(['Building_type'],axis=1)
material_glass_sem=pd.DataFrame(material_glass_sem.values.T, index=material_glass_sem.columns, columns=material_glass_sem.index)
a=m2_sem_urb.index
material_glass_sem=material_glass_sem.set_index(a)
kg_sem_urb_glass=m2_sem_urb*material_glass_sem
material_glass_app=building_materials_glass.loc[(building_materials_glass['Building_type']=='Appartments')]
material_glass_app=material_glass_app.set_index('Region')
material_glass_app=material_glass_app.drop(['Building_type'],axis=1)
material_glass_app=pd.DataFrame(material_glass_app.values.T, index=material_glass_app.columns, columns=material_glass_app.index)
a=m2_app_urb.index
material_glass_app=material_glass_app.set_index(a)
kg_app_urb_glass=m2_app_urb*material_glass_app
material_glass_hig=building_materials_glass.loc[(building_materials_glass['Building_type']=='High-rise')]
material_glass_hig=material_glass_hig.set_index('Region')
material_glass_hig=material_glass_hig.drop(['Building_type'],axis=1)
material_glass_hig=pd.DataFrame(material_glass_hig.values.T, index=material_glass_hig.columns, columns=material_glass_hig.index)
a=m2_hig_urb.index
material_glass_hig=material_glass_hig.set_index(a)
kg_hig_urb_glass=m2_hig_urb*material_glass_hig
# Commercial Building materials (in Million kg)
#commercial concrete stock
materials_concrete_office=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Offices')]
materials_concrete_office=materials_concrete_office.drop(['Building_type'],axis=1)
materials_concrete_office=pd.DataFrame(materials_concrete_office.values.T, index=materials_concrete_office.columns, columns=materials_concrete_office.index)
a= commercial_m2_office.index
materials_concrete_office=materials_concrete_office.set_index(a)
kg_office_concrete=commercial_m2_office*materials_concrete_office
materials_concrete_retail=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Retail+')]
materials_concrete_retail=materials_concrete_retail.drop(['Building_type'],axis=1)
materials_concrete_retail=pd.DataFrame(materials_concrete_retail.values.T, index=materials_concrete_retail.columns, columns=materials_concrete_retail.index)
a= commercial_m2_retail.index
materials_concrete_retail=materials_concrete_retail.set_index(a)
kg_retail_concrete=commercial_m2_retail*materials_concrete_retail
materials_concrete_hotels=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Hotels+')]
materials_concrete_hotels=materials_concrete_hotels.drop(['Building_type'],axis=1)
materials_concrete_hotels=pd.DataFrame(materials_concrete_hotels.values.T, index=materials_concrete_hotels.columns, columns=materials_concrete_hotels.index)
a= commercial_m2_hotels.index
materials_concrete_hotels=materials_concrete_hotels.set_index(a)
kg_hotels_concrete=commercial_m2_hotels*materials_concrete_hotels
materials_concrete_govern=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Govt+')]
materials_concrete_govern=materials_concrete_govern.drop(['Building_type'],axis=1)
materials_concrete_govern=pd.DataFrame(materials_concrete_govern.values.T, index=materials_concrete_govern.columns, columns=materials_concrete_govern.index)
a= commercial_m2_govern.index
materials_concrete_govern=materials_concrete_govern.set_index(a)
kg_govern_concrete=commercial_m2_govern*materials_concrete_govern
#commercial glass stock
materials_glass_office=materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Offices')]
materials_glass_office=materials_glass_office.drop(['Building_type'],axis=1)
materials_glass_office=pd.DataFrame(materials_glass_office.values.T, index=materials_glass_office.columns, columns=materials_glass_office.index)
a= commercial_m2_office.index
materials_glass_office=materials_glass_office.set_index(a)
kg_office_glass=commercial_m2_office*materials_glass_office
materials_glass_retail=materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Retail+')]
materials_glass_retail=materials_glass_retail.drop(['Building_type'],axis=1)
materials_glass_retail=pd.DataFrame(materials_glass_retail.values.T, index=materials_glass_retail.columns, columns=materials_glass_retail.index)
a= commercial_m2_retail.index
materials_glass_retail=materials_glass_retail.set_index(a)
kg_retail_glass=commercial_m2_retail*materials_glass_retail
materials_glass_hotels=materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Hotels+')]
materials_glass_hotels=materials_glass_hotels.drop(['Building_type'],axis=1)
materials_glass_hotels=pd.DataFrame(materials_glass_hotels.values.T, index=materials_glass_hotels.columns, columns=materials_glass_hotels.index)
a= commercial_m2_hotels.index
materials_glass_hotels=materials_glass_hotels.set_index(a)
kg_hotels_glass=commercial_m2_hotels*materials_glass_hotels
materials_glass_govern=materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Govt+')]
materials_glass_govern=materials_glass_govern.drop(['Building_type'],axis=1)
materials_glass_govern=pd.DataFrame(materials_glass_govern.values.T, index=materials_glass_govern.columns, columns=materials_glass_govern.index)
a= commercial_m2_govern.index
materials_glass_govern=materials_glass_govern.set_index(a)
kg_govern_glass=commercial_m2_govern*materials_glass_govern
# Summing commercial material stock (Million kg)
kg_concrete_comm = kg_office_concrete + kg_retail_concrete + kg_hotels_concrete + kg_govern_concrete
kg_glass_comm = kg_office_glass + kg_retail_glass + kg_hotels_glass + kg_govern_glass
# Summing across RESIDENTIAL building types (millions of kg, in stock)
kg_concrete_urb = kg_hig_urb_concrete + kg_app_urb_concrete + kg_sem_urb_concrete + kg_det_urb_concrete
kg_concrete_rur = kg_hig_rur_concrete + kg_app_rur_concrete + kg_sem_rur_concrete + kg_det_rur_concrete
kg_glass_urb = kg_hig_urb_glass + kg_app_urb_glass + kg_sem_urb_glass + kg_det_urb_glass
kg_glass_rur = kg_hig_rur_glass + kg_app_rur_glass + kg_sem_rur_glass + kg_det_rur_glass
# Sums for total building material use (in-stock, millions of kg)
kg_concrete = kg_concrete_urb + kg_concrete_rur + kg_concrete_comm
kg_glass = kg_glass_urb + kg_glass_rur + kg_glass_comm
#%% INFLOW & OUTFLOW
import sys
sys.path.append(dir_path)
import dynamic_stock_model
from dynamic_stock_model import DynamicStockModel as DSM
idx = pd.IndexSlice # needed for slicing multi-index
#if flag_Normal == 0:
# lifetimes_DB = pd.read_csv('files_lifetimes\lifetimes.csv') # Weibull parameter database (shape & scale parameters given by region, area & building-type)
#else:
# lifetimes_DB = pd.read_csv('files_lifetimes\lifetimes_normal.csv') # Normal distribution database (Mean & StDev parameters given by region, area & building-type, though only defined by region for now)
lifetimes_DB_shape = pd.read_csv(dir_path + '/files_lifetimes/lifetimes_shape.csv')
lifetimes_DB_scale= pd.read_csv(dir_path + '/files_lifetimes/lifetimes_scale.csv')
# actual inflow calculations
def inflow_outflown(shape, scale, stock, length): # length is the number of years in the entire period
out_oc_reg = pd.DataFrame(index=range(1721,2061), columns= pd.MultiIndex.from_product([list(range(1,27)), list(range(1721,2061))])) # Multi-index columns (region & years), to contain a matrix of years*years for each region
out_i_reg = pd.DataFrame(index=range(1721,2061), columns=range(1,27))
out_s_reg = pd.DataFrame(index=range(1721,2061), columns=range(1,27))
out_o_reg = pd.DataFrame(index=range(1721,2061), columns=range(1,27))
for region in range(1,27):
shape_list = shape.loc[region]
scale_list = scale.loc[region]
if flag_Normal == 0:
DSMforward = DSM(t = np.arange(0,length,1), s=np.array(stock[region]), lt = {'Type': 'Weibull', 'Shape': np.array(shape_list), 'Scale': np.array(scale_list)})
else:
DSMforward = DSM(t = np.arange(0,length,1), s=np.array(stock[region]), lt = {'Type': 'FoldNorm', 'Mean': np.array(shape_list), 'StdDev': np.array(scale_list)}) # shape & scale list are actually Mean & StDev here
out_sc, out_oc, out_i = DSMforward.compute_stock_driven_model(NegativeInflowCorrect = True)
out_i_reg[region] = out_i
out_oc[out_oc < 0] = 0 # remove negative outflow, replace by 0
out_oc_reg.loc[:,idx[region,:]] = out_oc
# If you are only interested in the total outflow, you can sum the outflow by cohort
out_o_reg[region] = out_oc.sum(axis=1)
out_o_reg_corr = out_o_reg._get_numeric_data()
out_o_reg_corr[out_o_reg_corr < 0] = 0
out_s_reg[region] = out_sc.sum(axis=1) #Stock
return out_i_reg, out_oc_reg
length = len(m2_hig_urb[1]) # = 340
nindex=np.arange(0,26)
shape_selection_m2_det_rur = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Rural') & (lifetimes_DB_shape['Type'] == 'Detached')]
scale_selection_m2_det_rur = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Rural') & (lifetimes_DB_scale['Type'] == 'Detached')]
shape_selection_m2_sem_rur = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Rural') & (lifetimes_DB_shape['Type'] == 'Semi-detached')]
scale_selection_m2_sem_rur = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Rural') & (lifetimes_DB_scale['Type'] == 'Semi-detached')]
shape_selection_m2_app_rur = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Rural') & (lifetimes_DB_shape['Type'] == 'Appartments')]
scale_selection_m2_app_rur = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Rural') & (lifetimes_DB_scale['Type'] == 'Appartments')]
shape_selection_m2_hig_rur = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Rural') & (lifetimes_DB_shape['Type'] == 'High-rise')]
scale_selection_m2_hig_rur = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Rural') & (lifetimes_DB_scale['Type'] == 'High-rise')]
shape_selection_m2_det_rur=shape_selection_m2_det_rur.set_index('Region')
shape_selection_m2_det_rur=shape_selection_m2_det_rur.drop(['Type', 'Area'],axis=1)
scale_selection_m2_det_rur=scale_selection_m2_det_rur.set_index('Region')
scale_selection_m2_det_rur=scale_selection_m2_det_rur.drop(['Type', 'Area'],axis=1)
shape_selection_m2_sem_rur=shape_selection_m2_sem_rur.set_index('Region')
shape_selection_m2_sem_rur=shape_selection_m2_sem_rur.drop(['Type', 'Area'],axis=1)
scale_selection_m2_sem_rur=scale_selection_m2_sem_rur.set_index('Region')
scale_selection_m2_sem_rur=scale_selection_m2_sem_rur.drop(['Type', 'Area'],axis=1)
shape_selection_m2_app_rur=shape_selection_m2_app_rur.set_index('Region')
shape_selection_m2_app_rur=shape_selection_m2_app_rur.drop(['Type', 'Area'],axis=1)
scale_selection_m2_app_rur=scale_selection_m2_app_rur.set_index('Region')
scale_selection_m2_app_rur=scale_selection_m2_app_rur.drop(['Type', 'Area'],axis=1)
shape_selection_m2_hig_rur=shape_selection_m2_hig_rur.set_index('Region')
shape_selection_m2_hig_rur=shape_selection_m2_hig_rur.drop(['Type', 'Area'],axis=1)
scale_selection_m2_hig_rur=scale_selection_m2_hig_rur.set_index('Region')
scale_selection_m2_hig_rur=scale_selection_m2_hig_rur.drop(['Type', 'Area'],axis=1)
shape_selection_m2_det_urb=lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Urban') & (lifetimes_DB_shape['Type'] == 'Detached')]
scale_selection_m2_det_urb=lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Urban') & (lifetimes_DB_scale['Type'] == 'Detached')]
shape_selection_m2_sem_urb = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Urban') & (lifetimes_DB_shape['Type'] == 'Semi-detached')]
scale_selection_m2_sem_urb = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Urban') & (lifetimes_DB_scale['Type'] == 'Semi-detached')]
shape_selection_m2_app_urb =lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Urban') & (lifetimes_DB_shape['Type'] == 'Appartments')]
scale_selection_m2_app_urb =lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Urban') & (lifetimes_DB_scale['Type'] == 'Appartments')]
shape_selection_m2_hig_urb = lifetimes_DB_shape.loc[(lifetimes_DB_shape['Area'] == 'Urban') & (lifetimes_DB_shape['Type'] == 'High-rise')]
scale_selection_m2_hig_urb = lifetimes_DB_scale.loc[(lifetimes_DB_scale['Area'] == 'Urban') & (lifetimes_DB_scale['Type'] == 'High-rise')]
shape_selection_m2_det_urb=shape_selection_m2_det_urb.set_index('Region')
shape_selection_m2_det_urb=shape_selection_m2_det_urb.drop(['Type', 'Area'],axis=1)
scale_selection_m2_det_urb=scale_selection_m2_det_urb.set_index('Region')
scale_selection_m2_det_urb=scale_selection_m2_det_urb.drop(['Type', 'Area'],axis=1)
shape_selection_m2_sem_urb=shape_selection_m2_sem_urb.set_index('Region')
shape_selection_m2_sem_urb=shape_selection_m2_sem_urb.drop(['Type', 'Area'],axis=1)
scale_selection_m2_sem_urb=scale_selection_m2_sem_urb.set_index('Region')
scale_selection_m2_sem_urb=scale_selection_m2_sem_urb.drop(['Type', 'Area'],axis=1)
shape_selection_m2_app_urb=shape_selection_m2_app_urb.set_index('Region')
shape_selection_m2_app_urb=shape_selection_m2_app_urb.drop(['Type', 'Area'],axis=1)
scale_selection_m2_app_urb=scale_selection_m2_app_urb.set_index('Region')
scale_selection_m2_app_urb=scale_selection_m2_app_urb.drop(['Type', 'Area'],axis=1)
shape_selection_m2_hig_urb=shape_selection_m2_hig_urb.set_index('Region')
shape_selection_m2_hig_urb=shape_selection_m2_hig_urb.drop(['Type', 'Area'],axis=1)
scale_selection_m2_hig_urb=scale_selection_m2_hig_urb.set_index('Region')
scale_selection_m2_hig_urb=scale_selection_m2_hig_urb.drop(['Type', 'Area'],axis=1)
##
# Hardcoded lifetime parameters for COMMERCIAL building lifetime (avg. lt = 45 yr)
lifetimes_comm_shape = pd.read_csv(dir_path + '/files_lifetimes/lifetimes_shape_comm.csv')
lifetimes_comm_scale = pd.read_csv(dir_path + '/files_lifetimes/lifetimes_scale_comm.csv')
shape_comm = lifetimes_comm_shape.set_index('Region')
scale_comm = lifetimes_comm_scale.set_index('Region')
#Define the calculation of material outflow by cohort
def material_outflow(m2_outflow_cohort,material_density):
emp =[]
for i in range(0,26):
md = material_density.iloc[:,i]
m2 = m2_outflow_cohort.loc[:,(i+1,1721):(i+1,2060)]
m2.columns = md.index
material_outflow_cohort = m2*md
material_outflow_cohort_sum = material_outflow_cohort.sum(1)
emp.append(material_outflow_cohort_sum)
result = pd.DataFrame(emp)
result.index = range(1, 27)
return result.T
# call the actual stock model to derive inflow & outflow based on stock & lifetime
m2_det_rur_i, m2_det_rur_oc = inflow_outflown(shape_selection_m2_det_rur, scale_selection_m2_det_rur, m2_det_rur, length)
m2_sem_rur_i, m2_sem_rur_oc = inflow_outflown(shape_selection_m2_sem_rur, scale_selection_m2_sem_rur, m2_sem_rur, length)
m2_app_rur_i, m2_app_rur_oc = inflow_outflown(shape_selection_m2_app_rur, scale_selection_m2_app_rur, m2_app_rur, length)
m2_hig_rur_i, m2_hig_rur_oc = inflow_outflown(shape_selection_m2_hig_rur, scale_selection_m2_hig_rur, m2_hig_rur, length)
m2_det_urb_i, m2_det_urb_oc = inflow_outflown(shape_selection_m2_det_urb, scale_selection_m2_det_urb, m2_det_urb, length)
m2_sem_urb_i, m2_sem_urb_oc = inflow_outflown(shape_selection_m2_sem_urb, scale_selection_m2_sem_urb, m2_sem_urb, length)
m2_app_urb_i, m2_app_urb_oc = inflow_outflown(shape_selection_m2_app_urb, scale_selection_m2_app_urb, m2_app_urb, length)
m2_hig_urb_i, m2_hig_urb_oc = inflow_outflown(shape_selection_m2_hig_urb, scale_selection_m2_hig_urb, m2_hig_urb, length)
m2_office_i, m2_office_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_office, length)
m2_retail_i, m2_retail_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_retail, length)
m2_hotels_i, m2_hotels_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_hotels, length)
m2_govern_i, m2_govern_oc = inflow_outflown(shape_comm, scale_comm, commercial_m2_govern, length)
# total MILLIONS of square meters inflow
m2_res_i = m2_det_rur_i + m2_sem_rur_i + m2_app_rur_i + m2_hig_rur_i + m2_det_urb_i + m2_sem_urb_i + m2_app_urb_i + m2_hig_urb_i
m2_comm_i = m2_office_i + m2_retail_i + m2_hotels_i + m2_govern_i
kg_det_rur_concrete_i = m2_det_rur_i * material_concrete_det
kg_det_rur_glass_i = m2_det_rur_i * material_glass_det
kg_sem_rur_concrete_i = m2_sem_rur_i * material_concrete_sem
kg_sem_rur_glass_i = m2_sem_rur_i * material_glass_sem
kg_app_rur_concrete_i = m2_app_rur_i * material_concrete_app
kg_app_rur_glass_i = m2_app_rur_i * material_glass_app
kg_hig_rur_concrete_i = m2_hig_rur_i * material_concrete_hig
kg_hig_rur_glass_i = m2_hig_rur_i * material_glass_hig
# URBAN material inflow (millions of kgs)
kg_det_urb_concrete_i = m2_det_urb_i * material_concrete_det
kg_det_urb_glass_i = m2_det_urb_i * material_glass_det
kg_sem_urb_concrete_i = m2_sem_urb_i * material_concrete_sem
kg_sem_urb_glass_i = m2_sem_urb_i * material_glass_sem
kg_app_urb_concrete_i = m2_app_urb_i * material_concrete_app
kg_app_urb_glass_i = m2_app_urb_i * material_glass_app
kg_hig_urb_concrete_i = m2_hig_urb_i * material_concrete_hig
kg_hig_urb_glass_i = m2_hig_urb_i * material_glass_hig
# Commercial Building materials INFLOW (in Million kg)
kg_office_concrete_i = m2_office_i * materials_concrete_office
kg_office_glass_i = m2_office_i * materials_glass_office
kg_retail_concrete_i = m2_retail_i * materials_concrete_retail
kg_retail_glass_i = m2_retail_i * materials_glass_retail
kg_hotels_concrete_i = m2_hotels_i * materials_concrete_hotels
kg_hotels_glass_i = m2_hotels_i * materials_glass_hotels
kg_govern_concrete_i = m2_govern_i * materials_concrete_govern
kg_govern_glass_i = m2_govern_i * materials_glass_govern
#% Material outflow
# RURAL material OUTflow (Millions of kgs = *1000 tons)
kg_det_rur_concrete_o = material_outflow(m2_det_rur_oc,material_concrete_det)
kg_det_rur_glass_o = material_outflow(m2_det_rur_oc, material_glass_det)
kg_sem_rur_concrete_o = material_outflow(m2_sem_rur_oc, material_concrete_sem)
kg_sem_rur_glass_o = material_outflow(m2_sem_rur_oc, material_glass_sem)
kg_app_rur_concrete_o = material_outflow(m2_app_rur_oc, material_concrete_app)
kg_app_rur_glass_o = material_outflow(m2_app_rur_oc, material_glass_app)
kg_hig_rur_concrete_o = material_outflow(m2_hig_rur_oc, material_concrete_hig)
kg_hig_rur_glass_o = material_outflow(m2_hig_rur_oc, material_glass_hig)
# URBAN material OUTflow (millions of kgs)
kg_det_urb_concrete_o = material_outflow(m2_det_urb_oc, material_concrete_det)
kg_det_urb_glass_o = material_outflow(m2_det_urb_oc, material_glass_det)
kg_sem_urb_concrete_o = material_outflow(m2_sem_urb_oc, material_concrete_sem)
kg_sem_urb_glass_o = material_outflow(m2_sem_urb_oc, material_glass_sem)
kg_app_urb_concrete_o = material_outflow(m2_app_urb_oc, material_concrete_app)
kg_app_urb_glass_o = material_outflow(m2_app_urb_oc, material_glass_app)
kg_hig_urb_concrete_o = material_outflow(m2_hig_urb_oc, material_concrete_hig)
kg_hig_urb_glass_o = material_outflow(m2_hig_urb_oc, material_glass_hig)
# Commercial Building materials OUTFLOW (in Million kg)
kg_office_concrete_o = material_outflow(m2_office_oc, materials_concrete_office)
kg_office_glass_o = material_outflow(m2_office_oc, materials_glass_office)
kg_retail_concrete_o = material_outflow(m2_retail_oc, materials_concrete_retail)
kg_retail_glass_o = material_outflow(m2_retail_oc, materials_glass_retail)
kg_hotels_concrete_o = material_outflow(m2_hotels_oc, materials_concrete_hotels)
kg_hotels_glass_o = material_outflow(m2_hotels_oc, materials_glass_hotels)
kg_govern_concrete_o = material_outflow(m2_govern_oc, materials_concrete_govern)
kg_govern_glass_o = material_outflow(m2_govern_oc, materials_glass_govern)
#%% CSV output (material stock & m2 stock)
length = 3
tag = ['stock', 'inflow', 'outflow']
# RURAL
kg_det_rur_concrete_out = [[]] * length
kg_det_rur_concrete_out[0] = kg_det_rur_concrete.transpose()
kg_det_rur_concrete_out[1] = kg_det_rur_concrete_i.transpose()
kg_det_rur_concrete_out[2] = kg_det_rur_concrete_o.transpose()
for item in range(0,length):
kg_det_rur_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_det_rur_concrete_out[item].insert(0,'area', ['rural'] * 26)
kg_det_rur_concrete_out[item].insert(0,'type', ['detached'] * 26)
kg_det_rur_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_det_rur_glass_out = [[]] * length
kg_det_rur_glass_out[0] = kg_det_rur_glass.transpose()
kg_det_rur_glass_out[1] = kg_det_rur_glass_i.transpose()
kg_det_rur_glass_out[2] = kg_det_rur_glass_o.transpose()
for item in range(0,length):
kg_det_rur_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_det_rur_glass_out[item].insert(0,'area', ['rural'] * 26)
kg_det_rur_glass_out[item].insert(0,'type', ['detached'] * 26)
kg_det_rur_glass_out[item].insert(0,'flow', [tag[item]] * 26)
kg_sem_rur_concrete_out = [[]] * length
kg_sem_rur_concrete_out[0] = kg_sem_rur_concrete.transpose()
kg_sem_rur_concrete_out[1] = kg_sem_rur_concrete_i.transpose()
kg_sem_rur_concrete_out[2] = kg_sem_rur_concrete_o.transpose()
for item in range(0,length):
kg_sem_rur_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_sem_rur_concrete_out[item].insert(0,'area', ['rural'] * 26)
kg_sem_rur_concrete_out[item].insert(0,'type', ['semi-detached'] * 26)
kg_sem_rur_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_sem_rur_glass_out = [[]] * length
kg_sem_rur_glass_out[0] = kg_sem_rur_glass.transpose()
kg_sem_rur_glass_out[1] = kg_sem_rur_glass_i.transpose()
kg_sem_rur_glass_out[2] = kg_sem_rur_glass_o.transpose()
for item in range(0,length):
kg_sem_rur_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_sem_rur_glass_out[item].insert(0,'area', ['rural'] * 26)
kg_sem_rur_glass_out[item].insert(0,'type', ['semi-detached'] * 26)
kg_sem_rur_glass_out[item].insert(0,'flow', [tag[item]] * 26)
kg_app_rur_concrete_out = [[]] * length
kg_app_rur_concrete_out[0] = kg_app_rur_concrete.transpose()
kg_app_rur_concrete_out[1] = kg_app_rur_concrete_i.transpose()
kg_app_rur_concrete_out[2] = kg_app_rur_concrete_o.transpose()
for item in range(0,length):
kg_app_rur_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_app_rur_concrete_out[item].insert(0,'area', ['rural'] * 26)
kg_app_rur_concrete_out[item].insert(0,'type', ['appartments'] * 26)
kg_app_rur_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_app_rur_glass_out = [[]] * length
kg_app_rur_glass_out[0] = kg_app_rur_glass.transpose()
kg_app_rur_glass_out[1] = kg_app_rur_glass_i.transpose()
kg_app_rur_glass_out[2] = kg_app_rur_glass_o.transpose()
for item in range(0,length):
kg_app_rur_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_app_rur_glass_out[item].insert(0,'area', ['rural'] * 26)
kg_app_rur_glass_out[item].insert(0,'type', ['appartments'] * 26)
kg_app_rur_glass_out[item].insert(0,'flow', [tag[item]] * 26)
kg_hig_rur_concrete_out = [[]] * length
kg_hig_rur_concrete_out[0] = kg_hig_rur_concrete.transpose()
kg_hig_rur_concrete_out[1] = kg_hig_rur_concrete_i.transpose()
kg_hig_rur_concrete_out[2] = kg_hig_rur_concrete_o.transpose()
for item in range(0,length):
kg_hig_rur_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_hig_rur_concrete_out[item].insert(0,'area', ['rural'] * 26)
kg_hig_rur_concrete_out[item].insert(0,'type', ['high-rise'] * 26)
kg_hig_rur_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_hig_rur_glass_out = [[]] * length
kg_hig_rur_glass_out[0] = kg_hig_rur_glass.transpose()
kg_hig_rur_glass_out[1] = kg_hig_rur_glass_i.transpose()
kg_hig_rur_glass_out[2] = kg_hig_rur_glass_o.transpose()
for item in range(0,length):
kg_hig_rur_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_hig_rur_glass_out[item].insert(0,'area', ['rural'] * 26)
kg_hig_rur_glass_out[item].insert(0,'type', ['high-rise'] * 26)
kg_hig_rur_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# URBAN
kg_det_urb_concrete_out = [[]] * length
kg_det_urb_concrete_out[0] = kg_det_urb_concrete.transpose()
kg_det_urb_concrete_out[1] = kg_det_urb_concrete_i.transpose()
kg_det_urb_concrete_out[2] = kg_det_urb_concrete_o.transpose()
for item in range(0,length):
kg_det_urb_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_det_urb_concrete_out[item].insert(0,'area', ['urban'] * 26)
kg_det_urb_concrete_out[item].insert(0,'type', ['detached'] * 26)
kg_det_urb_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_det_urb_glass_out = [[]] * length
kg_det_urb_glass_out[0] = kg_det_urb_glass.transpose()
kg_det_urb_glass_out[1] = kg_det_urb_glass_i.transpose()
kg_det_urb_glass_out[2] = kg_det_urb_glass_o.transpose()
for item in range(0,length):
kg_det_urb_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_det_urb_glass_out[item].insert(0,'area', ['urban'] * 26)
kg_det_urb_glass_out[item].insert(0,'type', ['detached'] * 26)
kg_det_urb_glass_out[item].insert(0,'flow', [tag[item]] * 26)
kg_sem_urb_concrete_out = [[]] * length
kg_sem_urb_concrete_out[0] = kg_sem_urb_concrete.transpose()
kg_sem_urb_concrete_out[1] = kg_sem_urb_concrete_i.transpose()
kg_sem_urb_concrete_out[2] = kg_sem_urb_concrete_o.transpose()
for item in range(0,length):
kg_sem_urb_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_sem_urb_concrete_out[item].insert(0,'area', ['urban'] * 26)
kg_sem_urb_concrete_out[item].insert(0,'type', ['semi-detached'] * 26)
kg_sem_urb_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_sem_urb_glass_out = [[]] * length
kg_sem_urb_glass_out[0] = kg_sem_urb_glass.transpose()
kg_sem_urb_glass_out[1] = kg_sem_urb_glass_i.transpose()
kg_sem_urb_glass_out[2] = kg_sem_urb_glass_o.transpose()
for item in range(0,length):
kg_sem_urb_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_sem_urb_glass_out[item].insert(0,'area', ['urban'] * 26)
kg_sem_urb_glass_out[item].insert(0,'type', ['semi-detached'] * 26)
kg_sem_urb_glass_out[item].insert(0,'flow', [tag[item]] * 26)
#
kg_app_urb_concrete_out = [[]] * length
kg_app_urb_concrete_out[0] = kg_app_urb_concrete.transpose()
kg_app_urb_concrete_out[1] = kg_app_urb_concrete_i.transpose()
kg_app_urb_concrete_out[2] = kg_app_urb_concrete_o.transpose()
for item in range(0,length):
kg_app_urb_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_app_urb_concrete_out[item].insert(0,'area', ['urban'] * 26)
kg_app_urb_concrete_out[item].insert(0,'type', ['appartments'] * 26)
kg_app_urb_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_app_urb_glass_out = [[]] * length
kg_app_urb_glass_out[0] = kg_app_urb_glass.transpose()
kg_app_urb_glass_out[1] = kg_app_urb_glass_i.transpose()
kg_app_urb_glass_out[2] = kg_app_urb_glass_o.transpose()
for item in range(0,length):
kg_app_urb_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_app_urb_glass_out[item].insert(0,'area', ['urban'] * 26)
kg_app_urb_glass_out[item].insert(0,'type', ['appartments'] * 26)
kg_app_urb_glass_out[item].insert(0,'flow', [tag[item]] * 26)
kg_hig_urb_concrete_out = [[]] * length
kg_hig_urb_concrete_out[0] = kg_hig_urb_concrete.transpose()
kg_hig_urb_concrete_out[1] = kg_hig_urb_concrete_i.transpose()
kg_hig_urb_concrete_out[2] = kg_hig_urb_concrete_o.transpose()
for item in range(0,length):
kg_hig_urb_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_hig_urb_concrete_out[item].insert(0,'area', ['urban'] * 26)
kg_hig_urb_concrete_out[item].insert(0,'type', ['high-rise'] * 26)
kg_hig_urb_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_hig_urb_glass_out = [[]] * length
kg_hig_urb_glass_out[0] = kg_hig_urb_glass.transpose()
kg_hig_urb_glass_out[1] = kg_hig_urb_glass_i.transpose()
kg_hig_urb_glass_out[2] = kg_hig_urb_glass_o.transpose()
for item in range(0,length):
kg_hig_urb_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_hig_urb_glass_out[item].insert(0,'area', ['urban'] * 26)
kg_hig_urb_glass_out[item].insert(0,'type', ['high-rise'] * 26)
kg_hig_urb_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# COMMERCIAL ------------------------------------------------------------------
# offices
kg_office_concrete_out = [[]] * length
kg_office_concrete_out[0] = kg_office_concrete.transpose()
kg_office_concrete_out[1] = kg_office_concrete_i.transpose()
kg_office_concrete_out[2] = kg_office_concrete_o.transpose()
for item in range(0,length):
kg_office_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_office_concrete_out[item].insert(0,'area', ['commercial'] * 26)
kg_office_concrete_out[item].insert(0,'type', ['office'] * 26)
kg_office_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_office_glass_out = [[]] * length
kg_office_glass_out[0] = kg_office_glass.transpose()
kg_office_glass_out[1] = kg_office_glass_i.transpose()
kg_office_glass_out[2] = kg_office_glass_o.transpose()
for item in range(0,length):
kg_office_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_office_glass_out[item].insert(0,'area', ['commercial'] * 26)
kg_office_glass_out[item].insert(0,'type', ['office'] * 26)
kg_office_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# shops & retail
kg_retail_concrete_out = [[]] * length
kg_retail_concrete_out[0] = kg_retail_concrete.transpose()
kg_retail_concrete_out[1] = kg_retail_concrete_i.transpose()
kg_retail_concrete_out[2] = kg_retail_concrete_o.transpose()
for item in range(0,length):
kg_retail_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_retail_concrete_out[item].insert(0,'area', ['commercial'] * 26)
kg_retail_concrete_out[item].insert(0,'type', ['retail'] * 26)
kg_retail_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_retail_glass_out = [[]] * length
kg_retail_glass_out[0] = kg_retail_glass.transpose()
kg_retail_glass_out[1] = kg_retail_glass_i.transpose()
kg_retail_glass_out[2] = kg_retail_glass_o.transpose()
for item in range(0,length):
kg_retail_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_retail_glass_out[item].insert(0,'area', ['commercial'] * 26)
kg_retail_glass_out[item].insert(0,'type', ['retail'] * 26)
kg_retail_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# hotels & restaurants
kg_hotels_concrete_out = [[]] * length
kg_hotels_concrete_out[0] = kg_hotels_concrete.transpose()
kg_hotels_concrete_out[1] = kg_hotels_concrete_i.transpose()
kg_hotels_concrete_out[2] = kg_hotels_concrete_o.transpose()
for item in range(0,length):
kg_hotels_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_hotels_concrete_out[item].insert(0,'area', ['commercial'] * 26)
kg_hotels_concrete_out[item].insert(0,'type', ['hotels'] * 26)
kg_hotels_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_hotels_glass_out = [[]] * length
kg_hotels_glass_out[0] = kg_hotels_glass.transpose()
kg_hotels_glass_out[1] = kg_hotels_glass_i.transpose()
kg_hotels_glass_out[2] = kg_hotels_glass_o.transpose()
for item in range(0,length):
kg_hotels_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_hotels_glass_out[item].insert(0,'area', ['commercial'] * 26)
kg_hotels_glass_out[item].insert(0,'type', ['hotels'] * 26)
kg_hotels_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# government (schools, government, public transport, hospitals)
kg_govern_concrete_out = [[]] * length
kg_govern_concrete_out[0] = kg_govern_concrete.transpose()
kg_govern_concrete_out[1] = kg_govern_concrete_i.transpose()
kg_govern_concrete_out[2] = kg_govern_concrete_o.transpose()
for item in range(0,length):
kg_govern_concrete_out[item].insert(0,'material', ['concrete'] * 26)
kg_govern_concrete_out[item].insert(0,'area', ['commercial'] * 26)
kg_govern_concrete_out[item].insert(0,'type', ['govern'] * 26)
kg_govern_concrete_out[item].insert(0,'flow', [tag[item]] * 26)
kg_govern_glass_out = [[]] * length
kg_govern_glass_out[0] = kg_govern_glass.transpose()
kg_govern_glass_out[1] = kg_govern_glass_i.transpose()
kg_govern_glass_out[2] = kg_govern_glass_o.transpose()
for item in range(0,length):
kg_govern_glass_out[item].insert(0,'material', ['glass'] * 26)
kg_govern_glass_out[item].insert(0,'area', ['commercial'] * 26)
kg_govern_glass_out[item].insert(0,'type', ['govern'] * 26)
kg_govern_glass_out[item].insert(0,'flow', [tag[item]] * 26)
# stack into 1 dataframe
frames = [kg_det_rur_concrete_out[0], kg_det_rur_glass_out[0],
kg_sem_rur_concrete_out[0], kg_sem_rur_glass_out[0],
kg_app_rur_concrete_out[0], kg_app_rur_glass_out[0],
kg_hig_rur_concrete_out[0], kg_hig_rur_glass_out[0],
kg_det_urb_concrete_out[0], kg_det_urb_glass_out[0],
kg_sem_urb_concrete_out[0], kg_sem_urb_glass_out[0],
kg_app_urb_concrete_out[0], kg_app_urb_glass_out[0],
kg_hig_urb_concrete_out[0], kg_hig_urb_glass_out[0],
kg_office_concrete_out[0], kg_office_glass_out[0],
kg_retail_concrete_out[0], kg_retail_glass_out[0],
kg_hotels_concrete_out[0], kg_hotels_glass_out[0],
kg_govern_concrete_out[0], kg_govern_glass_out[0],
kg_det_rur_concrete_out[1], kg_det_rur_glass_out[1],
kg_sem_rur_concrete_out[1], kg_sem_rur_glass_out[1],
kg_app_rur_concrete_out[1], kg_app_rur_glass_out[1],
kg_hig_rur_concrete_out[1], kg_hig_rur_glass_out[1],
kg_det_urb_concrete_out[1], kg_det_urb_glass_out[1],
kg_sem_urb_concrete_out[1], kg_sem_urb_glass_out[1],
kg_app_urb_concrete_out[1], kg_app_urb_glass_out[1],
kg_hig_urb_concrete_out[1], kg_hig_urb_glass_out[1],
kg_office_concrete_out[1], kg_office_glass_out[1],
kg_retail_concrete_out[1], kg_retail_glass_out[1],
kg_hotels_concrete_out[1], kg_hotels_glass_out[1],
kg_govern_concrete_out[1], kg_govern_glass_out[1],
kg_det_rur_concrete_out[2], kg_det_rur_glass_out[2],
kg_sem_rur_concrete_out[2], kg_sem_rur_glass_out[2],
kg_app_rur_concrete_out[2], kg_app_rur_glass_out[2],
kg_hig_rur_concrete_out[2], kg_hig_rur_glass_out[2],
kg_det_urb_concrete_out[2], kg_det_urb_glass_out[2],
kg_sem_urb_concrete_out[2], kg_sem_urb_glass_out[2],
kg_app_urb_concrete_out[2], kg_app_urb_glass_out[2],
kg_hig_urb_concrete_out[2], kg_hig_urb_glass_out[2],
kg_office_concrete_out[2], kg_office_glass_out[2],
kg_retail_concrete_out[2], kg_retail_glass_out[2],
kg_hotels_concrete_out[2], kg_hotels_glass_out[2],
kg_govern_concrete_out[2], kg_govern_glass_out[2] ]
material_output = pd.concat(frames)
material_output.to_csv('output_material\\concrete_glass_output.csv')
##sand in concrete and glass
building_materials = pd.read_csv('output_material//concrete_glass_output.csv') # Building_materials_inflow; unit: kg; meaning: the materials inflow (by building type, by region & by area)
#building_materials = material_output # Building_materials_inflow; unit: kg; meaning: the materials inflow (by building type, by region & by area)
building_materials_inflow = building_materials.loc[(building_materials['flow']=='inflow')]
building_materials_inflow = building_materials_inflow.set_index('Unnamed: 0')
building_materials_outflow = building_materials.loc[(building_materials['flow']=='outflow')]
building_materials_outflow = building_materials_outflow.set_index('Unnamed: 0')
# recovery and reuse
recovery_rate = pd.read_csv('files_recovery_rate//recovery_rate.csv')
recovery_rate = recovery_rate.set_index('Unnamed: 0')
reuse_rate = pd.read_csv('files_recovery_rate//reuse_rate.csv')
reuse_rate = reuse_rate.set_index('Unnamed: 0')
materials_recovery_avaliable = building_materials_outflow.iloc[:,4:] * recovery_rate.iloc[:,4:]
materials_reuse_avaliable = building_materials_outflow.iloc[:,4:] * reuse_rate.iloc[:,4:]
# secondary material
#materials_secondary = materials_recovery - materials_reuse # not good!
a = building_materials_inflow.iloc[:,4:].values
b = materials_recovery_avaliable.values
c = materials_reuse_avaliable.values
materials_recovery = pd.DataFrame(np.where(a < b, a, b), index=materials_recovery_avaliable.index, columns=materials_recovery_avaliable.columns)
materials_reuse = pd.DataFrame(np.where(a < c, a, c), index=materials_reuse_avaliable.index, columns=materials_reuse_avaliable.columns)
materials_primary = building_materials_inflow - materials_recovery
materials_secondary = materials_recovery - materials_reuse
# primary material input eqauls the inflow minus recovery
materials_primary = building_materials_inflow.iloc[:,4:]-materials_recovery
sand_primary_per_kg = pd.read_csv('files_sand_factor//sand_primary_per_kg.csv')
sand_primary_per_kg = sand_primary_per_kg.set_index('Unnamed: 0')
sand_secondary_per_kg = | pd.read_csv('files_sand_factor//sand_secondary_per_kg.csv') | pandas.read_csv |
"""
Preprocess sites scripts.
Written by <NAME>.
Winter 2020
"""
import os
import configparser
import json
import csv
import math
import glob
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Polygon, MultiPolygon, mapping, shape, MultiLineString, LineString
from shapely.ops import transform, unary_union, nearest_points
import fiona
from fiona.crs import from_epsg
import rasterio
from rasterio.mask import mask
from rasterstats import zonal_stats
import networkx as nx
from rtree import index
import numpy as np
import random
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def find_country_list(continent_list):
"""
This function produces country information by continent.
Parameters
----------
continent_list : list
Contains the name of the desired continent, e.g. ['Africa']
Returns
-------
countries : list of dicts
Contains all desired country information for countries in
the stated continent.
"""
glob_info_path = os.path.join(BASE_PATH, 'global_information.csv')
countries = pd.read_csv(glob_info_path, encoding = "ISO-8859-1")
countries = countries[countries.exclude != 1]
if len(continent_list) > 0:
data = countries.loc[countries['continent'].isin(continent_list)]
else:
data = countries
output = []
for index, country in data.iterrows():
output.append({
'country_name': country['country'],
'iso3': country['ISO_3digit'],
'iso2': country['ISO_2digit'],
'regional_level': country['lowest'],
'region': country['region']
})
return output
def process_coverage_shapes(country):
"""
Load in coverage maps, process and export for each country.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
iso2 = country['iso2']
technologies = [
'GSM',
'3G',
'4G'
]
for tech in technologies:
folder_coverage = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
filename = 'coverage_{}.shp'.format(tech)
path_output = os.path.join(folder_coverage, filename)
if os.path.exists(path_output):
continue
print('----')
print('Working on {} in {}'.format(tech, iso3))
filename = 'Inclusions_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
inclusions = gpd.read_file(os.path.join(folder, filename))
if iso2 in inclusions['CNTRY_ISO2']:
filename = 'MCE_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
else:
filename = 'OCI_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_OCI')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
if len(coverage) > 0:
print('Dissolving polygons')
coverage['dissolve'] = 1
coverage = coverage.dissolve(by='dissolve', aggfunc='sum')
coverage = coverage.to_crs('epsg:3857')
print('Excluding small shapes')
coverage['geometry'] = coverage.apply(clean_coverage,axis=1)
print('Removing empty and null geometries')
coverage = coverage[~(coverage['geometry'].is_empty)]
coverage = coverage[coverage['geometry'].notnull()]
print('Simplifying geometries')
coverage['geometry'] = coverage.simplify(
tolerance = 0.005,
preserve_topology=True).buffer(0.0001).simplify(
tolerance = 0.005,
preserve_topology=True
)
coverage = coverage.to_crs('epsg:4326')
if not os.path.exists(folder_coverage):
os.makedirs(folder_coverage)
coverage.to_file(path_output, driver='ESRI Shapefile')
return #print('Processed coverage shapes')
def process_regional_coverage(country):
"""
This functions estimates the area covered by each cellular
technology.
Parameters
----------
country : dict
Contains specific country parameters.
Returns
-------
output : dict
Results for cellular coverage by each technology for
each region.
"""
level = country['regional_level']
iso3 = country['iso3']
gid_level = 'GID_{}'.format(level)
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
technologies = [
'GSM',
'3G',
'4G'
]
output = {}
for tech in technologies:
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
path = os.path.join(folder, 'coverage_{}.shp'.format(tech))
if os.path.exists(path):
coverage = gpd.read_file(path, encoding="utf-8")
segments = gpd.overlay(regions, coverage, how='intersection')
tech_coverage = {}
for idx, region in segments.iterrows():
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
tech_coverage[region[gid_level]] = area_km2
output[tech] = tech_coverage
return output
def get_regional_data(country):
"""
Extract regional data including luminosity and population.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
level = country['regional_level']
gid_level = 'GID_{}'.format(level)
path_output = os.path.join(DATA_INTERMEDIATE, iso3, 'regional_coverage.csv')
if os.path.exists(path_output):
return #print('Regional data already exists')
path_country = os.path.join(DATA_INTERMEDIATE, iso3,
'national_outline.shp')
coverage = process_regional_coverage(country)
single_country = gpd.read_file(path_country)
# print('----')
# print('working on {}'.format(iso3))
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3,
'settlements.tif')
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
results = []
for index, region in regions.iterrows():
with rasterio.open(path_settlements) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
if 'GSM' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['GSM']:
coverage_GSM_km2 = coverage['GSM'][region[gid_level]]
else:
coverage_GSM_km2 = 0
else:
coverage_GSM_km2 = 0
if '3G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['3G']:
coverage_3G_km2 = coverage['3G'][region[gid_level]]
else:
coverage_3G_km2 = 0
else:
coverage_3G_km2 = 0
if '4G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['4G']:
coverage_4G_km2 = coverage['4G'][region[gid_level]]
else:
coverage_4G_km2 = 0
else:
coverage_4G_km2 = 0
results.append({
'GID_0': region['GID_0'],
'GID_id': region[gid_level],
'GID_level': gid_level,
# 'mean_luminosity_km2': luminosity_summation / area_km2 if luminosity_summation else 0,
'population': population_summation,
# 'pop_under_10_pop': pop_under_10_pop,
'area_km2': area_km2,
'population_km2': population_summation / area_km2 if population_summation else 0,
# 'pop_adults_km2': ((population_summation - pop_under_10_pop) /
# area_km2 if pop_under_10_pop else 0),
'coverage_GSM_percent': round(coverage_GSM_km2 / area_km2 * 100 if coverage_GSM_km2 else 0, 1),
'coverage_3G_percent': round(coverage_3G_km2 / area_km2 * 100 if coverage_3G_km2 else 0, 1),
'coverage_4G_percent': round(coverage_4G_km2 / area_km2 * 100 if coverage_4G_km2 else 0, 1),
})
# print('Working on backhaul')
backhaul_lut = estimate_backhaul(iso3, country['region'], '2025')
# print('Working on estimating sites')
results = estimate_sites(results, iso3, backhaul_lut)
results_df = pd.DataFrame(results)
results_df.to_csv(path_output, index=False)
# print('Completed {}'.format(single_country.NAME_0.values[0]))
return #print('Completed night lights data querying')
def find_pop_under_10(region, iso3):
"""
Find the estimated population under 10 years old.
Parameters
----------
region : pandas series
The region being modeled.
iso3 : string
ISO3 country code.
Returns
-------
population : int
Population sum under 10 years of age.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'under_10')
all_paths = glob.glob(path + '/*.tif')
population = []
for path in all_paths:
with rasterio.open(path) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
if population_summation is not None:
population.append(population_summation)
return sum(population)
def estimate_sites(data, iso3, backhaul_lut):
"""
Estimate the sites by region.
Parameters
----------
data : dataframe
Pandas df with regional data.
iso3 : string
ISO3 country code.
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
existing_site_data_path = os.path.join(DATA_INTERMEDIATE, iso3, 'sites', 'sites.csv')
existing_site_data = {}
if os.path.exists(existing_site_data_path):
site_data = pd.read_csv(existing_site_data_path)
site_data = site_data.to_dict('records')
for item in site_data:
existing_site_data[item['GID_id']] = item['sites']
population = 0
for region in data:
if region['population'] == None:
continue
population += int(region['population'])
path = os.path.join(DATA_RAW, 'wb_mobile_coverage', 'wb_population_coverage_2G.csv')
coverage = pd.read_csv(path, encoding='latin-1')
coverage = coverage.loc[coverage['Country ISO3'] == iso3]
if len(coverage) > 1:
coverage = coverage['2020'].values[0]
else:
coverage = 0
population_covered = population * (coverage / 100)
path = os.path.join(DATA_RAW, 'real_site_data', 'site_counts.csv')
towers = pd.read_csv(path, encoding = "ISO-8859-1")
towers = towers.loc[towers['iso3'] == iso3]
towers = towers['sites'].values[0]
if np.isnan(towers):
towers = 0
towers_per_pop = 0
else:
towers_per_pop = towers / population_covered
tower_backhaul_lut = estimate_backhaul_type(backhaul_lut)
data = sorted(data, key=lambda k: k['population_km2'], reverse=True)
covered_pop_so_far = 0
for region in data:
#first try to use actual data
if len(existing_site_data) > 0:
sites_estimated_total = existing_site_data[region['GID_id']]
if region['area_km2'] > 0:
sites_estimated_km2 = sites_estimated_total / region['area_km2']
else:
sites_estimated_km2 = 0
#or if we don't have data estimates of sites per area
else:
if covered_pop_so_far < population_covered:
sites_estimated_total = region['population'] * towers_per_pop
sites_estimated_km2 = region['population_km2'] * towers_per_pop
else:
sites_estimated_total = 0
sites_estimated_km2 = 0
backhaul_fiber = 0
backhaul_copper = 0
backhaul_wireless = 0
backhaul_satellite = 0
for i in range(1, int(round(sites_estimated_total)) + 1):
num = random.uniform(0, 1)
if num <= tower_backhaul_lut['fiber']:
backhaul_fiber += 1
elif tower_backhaul_lut['fiber'] < num <= tower_backhaul_lut['copper']:
backhaul_copper += 1
elif tower_backhaul_lut['copper'] < num <= tower_backhaul_lut['microwave']:
backhaul_wireless += 1
elif tower_backhaul_lut['microwave'] < num:
backhaul_satellite += 1
output.append({
'GID_0': region['GID_0'],
'GID_id': region['GID_id'],
'GID_level': region['GID_level'],
# 'mean_luminosity_km2': region['mean_luminosity_km2'],
'population': region['population'],
# 'pop_under_10_pop': region['pop_under_10_pop'],
'area_km2': region['area_km2'],
'population_km2': region['population_km2'],
# 'pop_adults_km2': region['pop_adults_km2'],
'coverage_GSM_percent': region['coverage_GSM_percent'],
'coverage_3G_percent': region['coverage_3G_percent'],
'coverage_4G_percent': region['coverage_4G_percent'],
'total_estimated_sites': sites_estimated_total,
'total_estimated_sites_km2': sites_estimated_km2,
'sites_3G': sites_estimated_total * (region['coverage_3G_percent'] /100),
'sites_4G': sites_estimated_total * (region['coverage_4G_percent'] /100),
'backhaul_fiber': backhaul_fiber,
'backhaul_copper': backhaul_copper,
'backhaul_wireless': backhaul_wireless,
'backhaul_satellite': backhaul_satellite,
})
if region['population'] == None:
continue
covered_pop_so_far += region['population']
return output
def estimate_backhaul(iso3, region, year):
"""
Get the correct backhaul composition for the region.
Parameters
----------
iso3 : string
ISO3 country code.
region : string
The continent the country is part of.
year : int
The year of the backhaul composition desired.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
path = os.path.join(BASE_PATH, 'raw', 'gsma', 'backhaul.csv')
backhaul_lut = pd.read_csv(path)
backhaul_lut = backhaul_lut.to_dict('records')
for item in backhaul_lut:
if region == item['Region'] and int(item['Year']) == int(year):
output.append({
'tech': item['Technology'],
'percentage': int(item['Value']),
})
return output
def estimate_backhaul_type(backhaul_lut):
"""
Process the tower backhaul lut.
Parameters
----------
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : dict
Tower backhaul lookup table.
"""
output = {}
preference = [
'fiber',
'copper',
'microwave',
'satellite'
]
perc_so_far = 0
for tech in preference:
for item in backhaul_lut:
if tech == item['tech'].lower():
perc = item['percentage']
output[tech] = (perc + perc_so_far) / 100
perc_so_far += perc
return output
def area_of_polygon(geom):
"""
Returns the area of a polygon. Assume WGS84 before converting
to projected crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
poly_area : int
Area of polygon in square kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
poly_area, poly_perimeter = geod.geometry_area_perimeter(
geom
)
return abs(int(poly_area))
def length_of_line(geom):
"""
Returns the length of a linestring. Assume WGS84 as crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
total_length : int
Length of the linestring given in kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
total_length = geod.line_length(*geom.xy)
return abs(int(total_length))
def estimate_numers_of_sites(linear_regressor, x_value):
"""
Function to predict the y value from the stated x value.
Parameters
----------
linear_regressor : object
Linear regression object.
x_value : float
The stated x value we want to use to predict y.
Returns
-------
result : float
The predicted y value.
"""
if not x_value == 0:
result = linear_regressor.predict(x_value)
result = result[0,0]
else:
result = 0
return result
def exclude_small_shapes(x):
"""
Remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
return x.geometry
# if its a multipolygon, we start trying to simplify
# and remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
area1 = 0.01
area2 = 50
# dont remove shapes if total area is already very small
if x.geometry.area < area1:
return x.geometry
# remove bigger shapes if country is really big
if x['GID_0'] in ['CHL','IDN']:
threshold = 0.01
elif x['GID_0'] in ['RUS','GRL','CAN','USA']:
threshold = 0.01
elif x.geometry.area > area2:
threshold = 0.1
else:
threshold = 0.001
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def clean_coverage(x):
"""
Cleans the coverage polygons by remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
if x.geometry.area > 1e7:
return x.geometry
# if its a multipolygon, we start trying to simplify and
# remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
threshold = 1e7
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def estimate_core_nodes(iso3, pop_density_km2, settlement_size):
"""
This function identifies settlements which exceed a desired settlement
size. It is assumed fiber exists at settlements over, for example,
20,000 inhabitants.
Parameters
----------
iso3 : string
ISO 3 digit country code.
pop_density_km2 : int
Population density threshold for identifying built up areas.
settlement_size : int
Overall sittelement size assumption, e.g. 20,000 inhabitants.
Returns
-------
output : list of dicts
Identified major settlements as Geojson objects.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
with rasterio.open(path) as src:
data = src.read()
threshold = pop_density_km2
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
nodes = nodes[nodes['sum'] >= settlement_size]
nodes['geometry'] = nodes['geometry'].centroid
nodes = get_points_inside_country(nodes, iso3)
output = []
for index, item in enumerate(nodes.to_dict('records')):
output.append({
'type': 'Feature',
'geometry': mapping(item['geometry']),
'properties': {
'network_layer': 'core',
'id': 'core_{}'.format(index),
'node_number': index,
}
})
return output
def get_points_inside_country(nodes, iso3):
"""
Check settlement locations lie inside target country.
Parameters
----------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
iso3 : string
ISO 3 digit country code.
Returns
-------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
"""
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
national_outline = gpd.read_file(path)
bool_list = nodes.intersects(national_outline.unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
return nodes
def generate_agglomeration_lut(country):
"""
Generate a lookup table of agglomerations.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
if not os.path.exists(folder):
os.makedirs(folder)
path_output = os.path.join(folder, 'agglomerations.shp')
if os.path.exists(path_output):
return print('Agglomeration processing has already completed')
print('Working on {} agglomeration lookup table'.format(iso3))
filename = 'regions_{}_{}.shp'.format(regional_level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path, crs="epsg:4326")
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
settlements = rasterio.open(path_settlements, 'r+')
settlements.nodata = 255
settlements.crs = {"epsg:4326"}
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
if not os.path.exists(folder_tifs):
os.makedirs(folder_tifs)
for idx, region in regions.iterrows():
bbox = region['geometry'].envelope
geo = gpd.GeoDataFrame()
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[idx])
coords = [json.loads(geo.to_json())['features'][0]['geometry']]
#chop on coords
out_img, out_transform = mask(settlements, coords, crop=True)
# Copy the metadata
out_meta = settlements.meta.copy()
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": 'epsg:4326'})
path_output = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path_output, "w", **out_meta) as dest:
dest.write(out_img)
print('Completed settlement.tif regional segmentation')
nodes, missing_nodes = find_nodes(country, regions)
missing_nodes = get_missing_nodes(country, regions, missing_nodes, 10, 10)
nodes = nodes + missing_nodes
nodes = gpd.GeoDataFrame.from_features(nodes, crs='epsg:4326')
bool_list = nodes.intersects(regions['geometry'].unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
agglomerations = []
print('Identifying agglomerations')
for idx1, region in regions.iterrows():
seen = set()
for idx2, node in nodes.iterrows():
if node['geometry'].intersects(region['geometry']):
agglomerations.append({
'type': 'Feature',
'geometry': mapping(node['geometry']),
'properties': {
'id': idx1,
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': node['sum'],
}
})
seen.add(region[GID_level])
if len(seen) == 0:
agglomerations.append({
'type': 'Feature',
'geometry': mapping(region['geometry'].centroid),
'properties': {
'id': 'regional_node',
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': 1,
}
})
agglomerations = gpd.GeoDataFrame.from_features(
[
{
'geometry': item['geometry'],
'properties': {
'id': item['properties']['id'],
'GID_0':item['properties']['GID_0'],
GID_level: item['properties'][GID_level],
'population': item['properties']['population'],
}
}
for item in agglomerations
],
crs='epsg:4326'
)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
path_output = os.path.join(folder, 'agglomerations' + '.shp')
agglomerations.to_file(path_output)
agglomerations['lon'] = agglomerations['geometry'].x
agglomerations['lat'] = agglomerations['geometry'].y
agglomerations = agglomerations[['lon', 'lat', GID_level, 'population']]
agglomerations.to_csv(os.path.join(folder, 'agglomerations.csv'), index=False)
return print('Agglomerations layer complete')
def process_existing_fiber(country):
"""
Load and process existing fiber data.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
iso2 = country['iso2'].lower()
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
if not os.path.exists(folder):
os.makedirs(folder)
filename = 'core_edges_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Existing fiber already processed')
path = os.path.join(DATA_RAW, 'afterfiber', 'afterfiber.shp')
shape = fiona.open(path)
data = []
for item in shape:
if item['properties']['iso2'].lower() == iso2.lower():
if item['geometry']['type'] == 'LineString':
if int(item['properties']['live']) == 1:
data.append({
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': item['geometry']['coordinates'],
},
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
if item['geometry']['type'] == 'MultiLineString':
if int(item['properties']['live']) == 1:
try:
geom = MultiLineString(item['geometry']['coordinates'])
for line in geom:
data.append({
'type': 'Feature',
'geometry': mapping(line),
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
except:
# some geometries are incorrect from data source
# exclude to avoid issues
pass
if len(data) == 0:
return print('No existing infrastructure')
data = gpd.GeoDataFrame.from_features(data)
data.to_file(path_output, crs='epsg:4326')
return print('Existing fiber processed')
def find_nodes_on_existing_infrastructure(country):
"""
Find those agglomerations which are within a buffered zone of
existing fiber links.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
filename = 'core_nodes_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Already found nodes on existing infrastructure')
else:
if not os.path.dirname(path_output):
os.makedirs(os.path.dirname(path_output))
path = os.path.join(folder, 'core_edges_existing.shp')
if not os.path.exists(path):
return print('No existing infrastructure')
existing_infra = gpd.read_file(path, crs='epsg:4326')
existing_infra = existing_infra.to_crs(epsg=3857)
existing_infra['geometry'] = existing_infra['geometry'].buffer(5000)
existing_infra = existing_infra.to_crs(epsg=4326)
# shape_output = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'core_edges_buffered.shp')
# existing_infra.to_file(shape_output, crs='epsg:4326')
path = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'agglomerations.shp')
agglomerations = gpd.read_file(path, crs='epsg:4326')
bool_list = agglomerations.intersects(existing_infra.unary_union)
agglomerations = pd.concat([agglomerations, bool_list], axis=1)
agglomerations = agglomerations[agglomerations[0] == True].drop(columns=0)
agglomerations['source'] = 'existing'
agglomerations.to_file(path_output, crs='epsg:4326')
return print('Found nodes on existing infrastructure')
def find_nodes(country, regions):
"""
Find key nodes.
Parameters
----------
country : dict
Contains all country specfic information.
regions : dataframe
All regions to be assessed.
Returns
-------
interim : list of dicts
Contains geojson dicts for nodes.
missing_nodes : list
Contains the id of regions with missing nodes.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
threshold = country['pop_density_km2']
settlement_size = country['settlement_size']
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
interim = []
missing_nodes = set()
print('Working on gathering data from regional rasters')
for idx, region in regions.iterrows():
path = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path) as src:
data = src.read()
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
geojson_region = [
{
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level]
}
}
]
gpd_region = gpd.GeoDataFrame.from_features(
[
{'geometry': poly['geometry'],
'properties':{
GID_level: poly['properties'][GID_level]
}}
for poly in geojson_region
], crs='epsg:4326'
)
if len(shapes_df) == 0:
continue
nodes = gpd.overlay(shapes_df, gpd_region, how='intersection')
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
nodes_subset = nodes[nodes['sum'] >= settlement_size]
if len(nodes_subset) == 0:
missing_nodes.add(region[GID_level])
for idx, item in nodes_subset.iterrows():
interim.append({
'geometry': item['geometry'].centroid,
'properties': {
GID_level: region[GID_level],
'count': item['count'],
'sum': item['sum']
}
})
return interim, missing_nodes
def get_missing_nodes(country, regions, missing_nodes, threshold, settlement_size):
"""
Find any missing nodes.
Parameters
----------
country : dict
Contains all country specfic information.
regions : dataframe
All regions to be assessed.
missing_nodes : list
Contains the id of regions with missing nodes.
threshold : int
Population density threshold in persons per square kilometer.
settlement_size : int
Overall settlement size threshold.
Returns
-------
interim : list of dicts
Contains geojson dicts for nodes.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
interim = []
for idx, region in regions.iterrows():
if not region[GID_level] in list(missing_nodes):
continue
path = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path) as src:
data = src.read()
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
geojson_region = [
{
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level]
}
}
]
gpd_region = gpd.GeoDataFrame.from_features(
[
{'geometry': poly['geometry'],
'properties':{
GID_level: poly['properties'][GID_level]
}}
for poly in geojson_region
], crs='epsg:4326'
)
nodes = gpd.overlay(shapes_df, gpd_region, how='intersection')
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = | pd.DataFrame(stats) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas
from dateutils import relativedelta
from datetime import date
from sqlalchemy import func, desc, asc
from mako.template import Template
from bokeh.charts import Bar, TimeSeries
from bokeh.embed import components
from models import Key, Signature
class HTMLOutput():
def __init__(self, session, ca_key, domain):
self.session = session
self.ca_key = ca_key
self.domain = domain
def domain_query(self, *args):
if self.domain is not None:
return self.session.query(*args).filter(Key.email.like('%{}'.format(self.domain)))
else:
return self.session.query(*args)
@property
def total_sigs(self):
return self.session.query(Signature).count()
@property
def total_sigs_this_month(self):
return self.session.query(Signature).filter(Signature.sign_date >= date.today().replace(day=1)).count()
@property
def total_ca_auto_sigs(self):
return self.session.query(Signature).filter(Signature.signer_key == self.ca_key).count()
@property
def total_ca_auto_sigs_this_month(self):
return self.session.query(Signature).filter(
Signature.signer_key == self.ca_key,
Signature.sign_date >= date.today().replace(day=1)
).count()
@property
def total_keys_and_sigs(self):
sigs = self.domain_query(
func.COUNT(Signature.id).label('num_sigs'),
Signature.sign_date
).join(Key).filter(
Signature.sign_date > date.today()-relativedelta(years=2)
).group_by(Signature.sign_date).order_by(asc(Signature.sign_date)).all()
current_num_sigs = self.session.query(Signature).filter(Signature.sign_date <= date.today()-relativedelta(years=2)).count()
if self.ca_key is not None:
ca_sigs = self.domain_query(
func.COUNT(Signature.id).label('num_sigs'),
Signature.sign_date
).join(Key).filter(
Signature.sign_date > date.today()-relativedelta(years=2),
Signature.signer_key == self.ca_key
).group_by(Signature.sign_date).order_by(asc(Signature.sign_date)).all()
current_num_ca_sigs = self.session.query(Signature).filter(
Signature.sign_date <= date.today()-relativedelta(years=2), Signature.signer_key == self.ca_key).count()
else:
ca_sigs = []
current_num_ca_sigs = 0
keys = self.domain_query(
func.COUNT(Key.id).label('num_keys'),
Key.created
).filter(
Key.created > date.today()-relativedelta(years=2)
).group_by(Key.created).order_by(asc(Key.created)).all()
current_num_keys = self.session.query(Key).filter(Key.created <= date.today()-relativedelta(years=2)).count()
sig_dates = [pandas.Timestamp(sig.sign_date) for sig in sigs]
ca_sig_dates = [pandas.Timestamp(sig.sign_date) for sig in ca_sigs]
key_dates = [ | pandas.Timestamp(key.created) | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import folium
import requests
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression
import streamlit as st
from streamlit_folium import folium_static
import streamlit.components.v1 as components
from bs4 import BeautifulSoup
import regex
with st.echo(code_location='below'):
# st.set_page_config(layout="wide")
st.write('Цель данного проекта - рассмотрение статистики по правонарушениям и преступлениям (англ. - offenses) '
'в США в течение последних десяти лет.')
# #BLOCK1
# entrypoint = "https://api.usa.gov/crime/fbi/sapi/api/agencies"
# query = {'api_key': 'e8vEnIM7V1Msff37SGU86c4r27dVzZOUow7LFCiM'}
# r = requests.get(entrypoint, params=query)
# data = r.json()
# columns_all = ['ori', 'agency_name', 'agency_type_name', 'state_name', 'state_abbr', 'division_name', 'region_name',
# 'region_desc', 'county_name', 'nibrs', 'latitude', 'longitude', 'nibrs_start_date']
# summ_all = pd.DataFrame(columns=columns_all)
# for i in data:
# for j in data[i]:
# a = (data[i][j])
# new = []
# for k in a:
# new += [a[k]]
# summ_all.loc[len(summ_all)] = new
# print(summ_all)
summ_all = pd.read_csv("summ_all.csv")
# BLOCK2
summ_all = (summ_all).dropna()
st.write(
'На данной карте представлены все агентства, подключенные к системе NIBRS (Национальная система отчетности об инцидентах) '
'Можно заметить, что данной системой активно пользуются в восточной части страны, а западной части сотаются '
'целые штаты, в которых ни одно агентство не используют NIBRS. '
'Например, в Пенсильвании находится более 1500 агентств, однако системой пользуют только 25 агентств. ')
m = folium.Map([41.75215, -97.61819], zoom_start=4)
for ind, row in summ_all.iterrows():
folium.Circle([row.latitude, row.longitude],
radius=10, control_scale=True).add_to(m)
folium_static(m)
# ct = summ_all[(summ_all['state_abbr'] == "KS")].reset_index().dropna()
# ct["Cases"] = np.nan
# for ori in ct['ori']:
# entrypoint = "https://api.usa.gov/crime/fbi/sapi/api/data/arrest/agencies/offense/" + ori + "/all/2019/2019"
# query = {'api_key': 'e8vEnIM7V1Msff37SGU86c4r27dVzZOUow7LFCiM'}
# data2 = requests.get(entrypoint, params=query).json()
# for h in data2:
# if type(data2[h]) == list and data2[h] != []:
# data2[h][0].pop("data_year")
# data2[h][0].pop("csv_header")
# values = data2[h][0].values()
# ct["Cases"][ct['ori'] == ori] = sum(values)
ct = pd.read_csv("ct.csv")
# BLOCK3
st.write(
'Давайте более подробно изучим статистики в одном из штатов. На карте расположены все агенства штата Канзас. '
'Размер точек зависит от количества зарегистрированных правонарушений или преступлений в 2019 году. ')
st.write("Число агентств в Казасе:")
st.write(pd.value_counts(summ_all['state_abbr'])["KS"])
ct = ct.dropna()
ct = ct.sort_values(by="Cases")
fig = go.Figure()
ct['text'] = "Number of registered offenses in " + ct['agency_name'] + " is " + (ct["Cases"]).astype(str)
limits = [(0, 10), (10, 100), (100, 1000), (1000, 3000), (3000, 15000)]
colors = ["royalblue", "crimson", "lightseagreen", "orange", "lightgrey"]
cities = []
scale = 5
fig = go.Figure()
# print(sum(ct["Cases"]))
for i in range(len(limits)):
lim = limits[i]
df_sub = ct[(ct["Cases"] >= lim[0]) & (ct["Cases"] < lim[1])]
fig.add_trace(go.Scattergeo(
locationmode='USA-states',
lon=df_sub['longitude'],
lat=df_sub['latitude'],
text=df_sub['text'],
marker=dict(
size=df_sub['Cases'] / scale,
color=colors[i],
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode='area'
),
name='{0} - {1}'.format(lim[0], lim[1])))
fig.update_layout(width=800, height=400,
geo=dict(
scope='north america',
showland=True,
landcolor="rgb(212, 212, 212)",
subunitcolor="rgb(255, 255, 255)",
center_lon=-98.0,
center_lat=38.45,
resolution=50,
coastlinecolor="white",
lonaxis=dict(
range=[-102.0, -93.0]
),
lataxis=dict(
range=[36.8, 40.2]
),
domain=dict(x=[0, 1], y=[0, 1])),
title='Agencies by offenses, Kansas, 2019',
)
st.plotly_chart(fig)
# #BLOCK4
# # state_data=((summ_all['state_abbr'].unique()))
# state_data = ['HI', 'DE', 'PR', 'TX', 'MA', 'MD', 'ME', 'IA', 'ID', 'MI', 'UT', 'MN', 'MO', 'IL',
# 'IN', 'MS', 'MT', 'AK', 'VA', 'AL', 'AR', 'VI', 'NC', 'ND', 'RI', 'NE', 'AZ', 'NH',
# 'NJ', 'VT', 'NM', 'FL', 'NV', 'WA', 'NY', 'SC', 'SD', 'WI', 'OH', 'GA', 'OK', 'CA',
# 'WV', 'WY', 'OR', 'GM', 'KS', 'CO', 'KY', 'PA', 'CT', 'LA', 'TN', 'DC']
# # EXCLUDE "PR"
# offenses = ["aggravated-assault", "burglary", "larceny", "motor-vehicle-theft", "homicide", "rape", "robbery",
# "arson",
# "violent-crime", "property-crime"]
# col_det = ["ori", "data_year", "offense", "state_abbr", "cleared", "actual"]
# years = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
# summ_off = pd.DataFrame()
# for abbr in state_data:
# state_alloff = pd.DataFrame()
# for off in offenses:
# state_off = pd.DataFrame(np.nan, index=[abbr], columns=years)
# entrypoint1 = "https://api.usa.gov/crime/fbi/sapi/api/nibrs/" + off + "/victim/states/" + abbr + "/count"
# print(entrypoint1)
# query = {'api_key': 'e8vEnIM7V1Msff37SGU86c4r27dVzZOUow7LFCiM'}
# r1 = requests.get(entrypoint1, params=query)
# data1 = r1.json()
# for i in data1:
# if type(data1[i]) == list and data1[i] != ['Count'] and data1[i] != []:
# for j in data1[i]:
# if years.count(str(j['data_year'])) == 1:
# state_off[str(j['data_year'])] = j['value']
# state_off["Offense"] = off
#
# state_alloff = state_alloff.append(state_off)
# summ_off = summ_off.append(state_alloff)
summ_off = pd.read_csv("summ_off.csv")
# print(summ_off)
# BLOCK5
state_data = ['HI', 'DE', 'PR', 'TX', 'MA', 'MD', 'ME', 'IA', 'ID', 'MI', 'UT', 'MN', 'MO', 'IL',
'IN', 'MS', 'MT', 'AK', 'VA', 'AL', 'AR', 'VI', 'NC', 'ND', 'RI', 'NE', 'AZ', 'NH',
'NJ', 'VT', 'NM', 'FL', 'NV', 'WA', 'NY', 'SC', 'SD', 'WI', 'OH', 'GA', 'OK', 'CA',
'WV', 'WY', 'OR', 'GM', 'KS', 'CO', 'KY', 'PA', 'CT', 'LA', 'TN', 'DC']
columns_all = summ_off.columns
agr_off = pd.DataFrame()
agr_off = pd.DataFrame(columns=columns_all)
for i in range(len(state_data)):
agr_off.loc[len(agr_off)] = summ_off.iloc[range(i, i + 10), :].sum(numeric_only=True)
agr_off["Offense"] = state_data
agr_off = (agr_off.set_index("Offense"))
sort = agr_off.sort_values(by="2019", ascending=False).head(10)
# sort.to_csv("sorted_by_state.csv")
# BLOCK6
name = "https://datausa.io/profile/geo/kansas"
r = requests.get(name)
soup = BeautifulSoup(r.text)
ans = soup.find("head")
cont = ((soup.find("head")).find_all("meta", {"name": "description"}))[0]['content']
st.write('Возьмём информацию о населении Канзаса с сайта datausa.io: ')
st.write(cont)
pop = (regex.findall(r"(?<=[^\Wm])\s[\d]+\W+[\d]+\w[M]", cont))
st.write("С помощью регулярных выражений найдем население Канзаса в 2018 году - ")
st.write(pop)
# BLOCK7
st.write('Далее рассмотрим правонарушения или преступления, совершенные с 2010 года, вычислив общее количество '
'преступлений в каждый год. Можно заметить, что суммарное число правонарушений или преступлений в год '
'снижается. На основе этих данных построим предсказание на 2020 год. ')
ind_list = range(460, 470)
ks_off = (summ_off.iloc[ind_list, :])
ks_total = ks_off.sum(numeric_only=True)
usa_total = summ_off.sum(numeric_only=True)
regr = LinearRegression()
X = np.array([2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]).reshape((-1, 1))
y = [0]
y[0] = ks_total
regr.fit(X, y[0])
figpr = plt.figure()
plt.plot(X, y[0])
plt.title("Offenses in Kansas, 2010-2019")
plt.plot(X, regr.predict(X), color='C1')
st.pyplot(figpr)
st.write('Предсказание для 2020')
st.write(regr.predict(np.array([[2020]])))
# BLOCK8
st.write('Рассчитаем корреляцию между уровенем безработицы, ВВП на душу населения'
'и числом правонарушений.')
unemp = np.array((pd.read_csv("KSURN.csv"))["KSURN"])
gdp_h = np.array((pd.read_csv("MEHOINUSKSA672N.csv"))["MEHOINUSKSA672N"])
u, u_sd = (np.around(np.mean(unemp), decimals=3), np.around(np.std(unemp), decimals=3))
g, g_sd = (np.around(np.mean(gdp_h / 1000), decimals=3), np.around(np.std(gdp_h / 1000), decimals=3))
st.write('Безработица и ВВП на душу населения в Канзасе, 2009-2019:')
st.write( | pd.read_csv("KSURN.csv") | pandas.read_csv |
# Copyright WillianFuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Uses the posterior distribution to prepare inferences for the Causal Impact summary and
plotting functionalities.
"""
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
from causalimpact.misc import maybe_unstandardize
tfd = tfp.distributions
def get_lower_upper_percentiles(alpha: float) -> List[float]:
"""
Returns the lower and upper quantile values for the chosen `alpha` value.
Args
----
alpha: float
Sets the size of the credible interval. If `alpha=0.05` then extracts the
95% credible interval for forecasts.
Returns
-------
List[float]
First value is the lower quantile and second value is upper.
"""
return [alpha * 100. / 2., 100 - alpha * 100. / 2.]
def compile_posterior_inferences(
original_index: pd.core.indexes.base.Index,
pre_data: pd.DataFrame,
post_data: pd.DataFrame,
one_step_dist: tfd.Distribution,
posterior_dist: tfd.Distribution,
mu_sig: Optional[Tuple[float, float]],
alpha: float = 0.05,
niter: int = 1000
) -> pd.DataFrame:
"""
Uses the posterior distribution of the structural time series probabilistic
model to run predictions and forecasts for observed data. Results are stored for
later usage on the summary and plotting functionalities.
Args
----
original_index: pd.core.indexes.base.Index
Original index from input data. If it's a `RangeIndex` then cast inferences
index to be of the same type.
pre_data: pd.DataFrame
This is the original input data, that is, it's not standardized.
post_data: pd.DataFrame
Same as `pre_data`.
This is the original input data, that is, it's not standardized.
one_step_dist: tfd.Distribution
Uses posterior parameters to run one-step-prediction on past observed data.
posterior_dist: tfd.Distribution
Uses posterior parameters to run forecasts on post intervention data.
mu_sig: Optional[Tuple[float, float]]
First value is the mean used for standardization and second value is the
standard deviation.
alpha: float
Sets credible interval size.
niter: int
Total mcmc samples to sample from the posterior structural model.
Returns
-------
inferences: pd.DataFrame
Final dataframe with all data related to one-step predictions and forecasts.
"""
lower_percen, upper_percen = get_lower_upper_percentiles(alpha)
# Integrates pre and post index for cumulative index data.
cum_index = build_cum_index(pre_data.index, post_data.index)
# We create a pd.Series with a single 0 (zero) value to work as the initial value
# when computing the cumulative inferences. Without this value the plotting of
# cumulative data breaks at the initial point.
zero_series = pd.Series([0])
simulated_pre_ys = one_step_dist.sample(niter) # shape (niter, n_train_timestamps, 1)
simulated_pre_ys = maybe_unstandardize(
np.squeeze(simulated_pre_ys.numpy()),
mu_sig
) # shape (niter, n_forecasts)
simulated_post_ys = posterior_dist.sample(niter) # shape (niter, n_forecasts, 1)
simulated_post_ys = maybe_unstandardize(
np.squeeze(simulated_post_ys.numpy()),
mu_sig
) # shape (niter, n_forecasts)
# Pre inference
pre_preds_means = one_step_dist.mean()
pre_preds_means = pd.Series(
np.squeeze(
maybe_unstandardize(pre_preds_means, mu_sig)
),
index=pre_data.index
)
pre_preds_lower, pre_preds_upper = np.percentile(
simulated_pre_ys,
[lower_percen, upper_percen],
axis=0
)
pre_preds_lower = pd.Series(pre_preds_lower, index=pre_data.index)
pre_preds_upper = pd.Series(pre_preds_upper, index=pre_data.index)
# Post inference
post_preds_means = posterior_dist.mean()
post_preds_means = pd.Series(
np.squeeze(
maybe_unstandardize(post_preds_means, mu_sig)
),
index=post_data.index
)
post_preds_lower, post_preds_upper = np.percentile(
simulated_post_ys,
[lower_percen, upper_percen],
axis=0
)
post_preds_lower = pd.Series(post_preds_lower, index=post_data.index)
post_preds_upper = pd.Series(post_preds_upper, index=post_data.index)
# Concatenations
complete_preds_means = pd.concat([pre_preds_means, post_preds_means])
complete_preds_lower = | pd.concat([pre_preds_lower, post_preds_lower]) | pandas.concat |
# -*- coding: utf-8 -*-
import six
import numpy as np
import pandas as pd
from functools import lru_cache
from .utils import wrap_formula_exc, FormulaException, func_counter
from .context import ExecutionContext
__updated__ = "2021-06-11"
@func_counter
def get_bars(freq):
@lru_cache(maxsize=256)
def _check_return_none(order_book_id, data_backend, current_date, start_date, freq):
# if security is suspend, just skip
trading_dates = ExecutionContext.get_data_backend(
).get_trading_dates(start=start_date, end=current_date)
if data_backend.skip_suspended and bars["datetime"][-1] // 1000000 != trading_dates[-1] and freq not in (
"W", "M"):
return order_book_id
else:
return ""
data_backend = ExecutionContext.get_data_backend()
current_date = ExecutionContext.get_current_date()
order_book_id = ExecutionContext.get_current_security()
start_date = ExecutionContext.get_start_date()
try:
bars = data_backend.get_price(
order_book_id, start=start_date, end=current_date, freq=freq)
except KeyError:
return np.array([])
if len(bars) > 0 and _check_return_none(order_book_id, data_backend, current_date, start_date, freq):
return np.array([])
return bars
@func_counter
def fit_series(*series_list):
size = min(len(series) for series in series_list)
if size == 0:
raise FormulaException("series size == 0")
new_series_list = [series[-size:] for series in series_list]
return new_series_list
def get_value(val):
if isinstance(val, TimeSeries):
return val.value
else:
return val
@func_counter
def get_series(val, size=640000):
"""todo 如果不再需要原始数组,则应在切片后调用 copy"""
if isinstance(val, TimeSeries):
return val.series
else:
return DuplicateNumericSeries(val, size).series
@func_counter
def ensure_timeseries(series):
if isinstance(series, TimeSeries):
return series
else:
return DuplicateNumericSeries(series)
class TimeSeries(object):
'''
https://docs.python.org/3/library/operator.html
'''
@property
def series(self):
raise NotImplementedError
@property
@wrap_formula_exc
def value(self):
try:
return self.series[-1]
except IndexError:
raise FormulaException("DATA UNAVAILABLE")
def __len__(self):
return len(self.series)
@wrap_formula_exc
def __lt__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 < s2
return BoolSeries(series)
@wrap_formula_exc
def __gt__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 > s2
return BoolSeries(series)
@wrap_formula_exc
def __eq__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 == s2
return BoolSeries(series)
@wrap_formula_exc
def __ne__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 != s2
return BoolSeries(series)
@wrap_formula_exc
def __ge__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 >= s2
return BoolSeries(series)
@wrap_formula_exc
def __le__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 <= s2
return BoolSeries(series)
@wrap_formula_exc
def __sub__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 - s2
return NumericSeries(series)
@wrap_formula_exc
def __rsub__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s2 - s1
return NumericSeries(series)
@wrap_formula_exc
def __add__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 + s2
return NumericSeries(series)
@wrap_formula_exc
def __radd__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s2 + s1
return NumericSeries(series)
@wrap_formula_exc
def __mul__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 * s2
return NumericSeries(series)
@wrap_formula_exc
def __rmul__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s2 * s1
return NumericSeries(series)
@wrap_formula_exc
def __truediv__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s1 / s2
return NumericSeries(series)
@wrap_formula_exc
def __rtruediv__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
with np.errstate(invalid='ignore'):
series = s2 / s1
return NumericSeries(series)
__div__ = __truediv__
def __bool__(self):
return len(self) > 0 and bool(self.value)
def __and__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
if s1.dtype != bool:
s1 = s1 > 0
if s2.dtype != bool:
s2 = s2 > 0
return BoolSeries(s1 & s2)
def __or__(self, other):
s1, s2 = fit_series(self.series, get_series(other))
return BoolSeries(s1 | s2)
@wrap_formula_exc
def __invert__(self):
with np.errstate(invalid='ignore'):
series = ~self.series
return BoolSeries(series)
# fix bug in python 2
# __nonzero__ = __bool__
def __repr__(self):
if len(self.series) == 0:
return ''
return str(self.value)
def __int__(self):
return int(self.value)
def shift(self, n: int=1, fill_value=np.nan):
from .utils import shift
return self.__class__(shift(self.series, n, fill_value))
class NumericSeries(TimeSeries):
def __init__(self, series=[]):
super(NumericSeries, self).__init__()
self._series = series
self.extra_create_kwargs = {}
@property
@func_counter
def series(self):
return self._series
def to_list(self):
"""返回list"""
if self.series is not None:
return self.series.to_list()
else:
return []
def to_df(self):
"""返回pd.Dataframe"""
if self.series is not None:
return | pd.DataFrame(self.series) | pandas.DataFrame |
# pylint: disable-msg=W0402
from datetime import datetime
import random
import string
from numpy.random import randn
import numpy as np
from pandas.core.api import (DateRange, Index, Series, DataFrame,
DataMatrix, WidePanel)
N = 30
K = 4
def rands(n):
choices = string.letters + string.digits
return ''.join([random.choice(choices) for _ in xrange(n)])
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def isiterable(obj):
return hasattr(obj, '__iter__')
def assert_almost_equal(a, b):
if isiterable(a):
np.testing.assert_(isiterable(b))
np.testing.assert_equal(len(a), len(b))
for i in xrange(len(a)):
assert_almost_equal(a[i], b[i])
return True
err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)
if np.isnan(a):
np.testing.assert_(np.isnan(b))
return
# case for zero
if abs(a) < 1e-5:
np.testing.assert_almost_equal(
a, b, decimal=5, err_msg=err_msg(a, b), verbose=False)
else:
np.testing.assert_almost_equal(
1, a/b, decimal=5, err_msg=err_msg(a, b), verbose=False)
def is_sorted(seq):
return assert_almost_equal(seq, np.sort(np.array(seq)))
def assert_dict_equal(a, b, compare_keys=True):
a_keys = frozenset(a.keys())
b_keys = frozenset(b.keys())
if compare_keys:
assert(a_keys == b_keys)
for k in a_keys:
assert_almost_equal(a[k], b[k])
def assert_series_equal(left, right):
assert_almost_equal(left, right)
assert(np.array_equal(left.index, right.index))
def assert_frame_equal(left, right):
for col, series in left.iteritems():
assert(col in right)
assert_series_equal(series, right[col])
for col in right:
assert(col in left)
def assert_contains_all(iterable, dic):
for k in iterable:
assert(k in dic)
def getCols(k):
return string.ascii_uppercase[:k]
def makeStringIndex(k):
return Index([rands(10) for _ in xrange(k)])
def makeIntIndex(k):
return Index(np.arange(k))
def makeDateIndex(k):
dates = list(DateRange(datetime(2000, 1, 1), periods=k))
return Index(dates)
def makeFloatSeries():
index = makeStringIndex(N)
return Series(randn(N), index=index)
def makeStringSeries():
index = makeStringIndex(N)
return Series(randn(N), index=index)
def makeObjectSeries():
dateIndex = makeDateIndex(N)
index = makeStringIndex(N)
return | Series(dateIndex, index=index) | pandas.core.api.Series |
import logging
import math
import warnings
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from dask import array as da, dataframe as dd
from distributed.utils_test import ( # noqa: F401
captured_logger,
cluster,
gen_cluster,
loop,
)
from sklearn.linear_model import SGDClassifier
from dask_ml._compat import DISTRIBUTED_2_5_0
from dask_ml.datasets import make_classification
from dask_ml.model_selection import (
HyperbandSearchCV,
IncrementalSearchCV,
SuccessiveHalvingSearchCV,
)
from dask_ml.model_selection._hyperband import _get_hyperband_params
from dask_ml.utils import ConstantFunction
from dask_ml.wrappers import Incremental
pytestmark = pytest.mark.skipif(not DISTRIBUTED_2_5_0, reason="hangs")
@pytest.mark.parametrize(
"array_type, library, max_iter",
[
("dask.array", "dask-ml", 9),
("numpy", "sklearn", 9),
("numpy", "ConstantFunction", 15),
("numpy", "ConstantFunction", 20),
],
)
def test_basic(array_type, library, max_iter):
@gen_cluster(client=True)
def _test_basic(c, s, a, b):
rng = da.random.RandomState(42)
n, d = (50, 2)
# create observations we know linear models can fit
X = rng.normal(size=(n, d), chunks=n // 2)
coef_star = rng.uniform(size=d, chunks=d)
y = da.sign(X.dot(coef_star))
if array_type == "numpy":
X, y = yield c.compute((X, y))
params = {
"loss": ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
"average": [True, False],
"learning_rate": ["constant", "invscaling", "optimal"],
"eta0": np.logspace(-2, 0, num=1000),
}
model = SGDClassifier(
tol=-np.inf, penalty="elasticnet", random_state=42, eta0=0.1
)
if library == "dask-ml":
model = Incremental(model)
params = {"estimator__" + k: v for k, v in params.items()}
elif library == "ConstantFunction":
model = ConstantFunction()
params = {"value": np.linspace(0, 1, num=1000)}
search = HyperbandSearchCV(model, params, max_iter=max_iter, random_state=42)
classes = c.compute(da.unique(y))
yield search.fit(X, y, classes=classes)
if library == "dask-ml":
X, y = yield c.compute((X, y))
score = search.best_estimator_.score(X, y)
assert score == search.score(X, y)
assert 0 <= score <= 1
if library == "ConstantFunction":
assert score == search.best_score_
else:
# These are not equal because IncrementalSearchCV uses a train/test
# split and we're testing on the entire train dataset, not only the
# validation/test set.
assert abs(score - search.best_score_) < 0.1
assert type(search.best_estimator_) == type(model)
assert isinstance(search.best_params_, dict)
num_fit_models = len(set(search.cv_results_["model_id"]))
num_pf_calls = sum(
[v[-1]["partial_fit_calls"] for v in search.model_history_.values()]
)
models = {9: 17, 15: 17, 20: 17, 27: 49, 30: 49, 81: 143}
pf_calls = {9: 69, 15: 101, 20: 144, 27: 357, 30: 379, 81: 1581}
assert num_fit_models == models[max_iter]
assert num_pf_calls == pf_calls[max_iter]
best_idx = search.best_index_
if isinstance(model, ConstantFunction):
assert search.cv_results_["test_score"][best_idx] == max(
search.cv_results_["test_score"]
)
model_ids = {h["model_id"] for h in search.history_}
if math.log(max_iter, 3) % 1.0 == 0:
# log(max_iter, 3) % 1.0 == 0 is the good case when max_iter is a
# power of search.aggressiveness
# In this case, assert that more models are tried then the max_iter
assert len(model_ids) > max_iter
else:
# Otherwise, give some padding "almost as many estimators are tried
# as max_iter". 3 is a fudge number chosen to be the minimum; when
# max_iter=20, len(model_ids) == 17.
assert len(model_ids) + 3 >= max_iter
assert all("bracket" in id_ for id_ in model_ids)
_test_basic()
@pytest.mark.parametrize("max_iter,aggressiveness", [(27, 3), (30, 4)])
def test_hyperband_mirrors_paper_and_metadata(max_iter, aggressiveness):
@gen_cluster(client=True)
def _test_mirrors_paper(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": np.random.rand(max_iter)}
alg = HyperbandSearchCV(
model,
params,
max_iter=max_iter,
random_state=0,
aggressiveness=aggressiveness,
)
yield alg.fit(X, y)
assert alg.metadata == alg.metadata_
assert isinstance(alg.metadata["brackets"], list)
assert set(alg.metadata.keys()) == {"n_models", "partial_fit_calls", "brackets"}
# Looping over alg.metadata["bracketes"] is okay because alg.metadata
# == alg.metadata_
for bracket in alg.metadata["brackets"]:
assert set(bracket.keys()) == {
"n_models",
"partial_fit_calls",
"bracket",
"SuccessiveHalvingSearchCV params",
"decisions",
}
if aggressiveness == 3:
assert alg.best_score_ == params["value"].max()
_test_mirrors_paper()
@gen_cluster(client=True)
def test_hyperband_patience(c, s, a, b):
# Test to make sure that specifying patience=True results in less
# computation
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
max_iter = 27
alg = HyperbandSearchCV(
model, params, max_iter=max_iter, patience=True, random_state=0
)
yield alg.fit(X, y)
alg_patience = max_iter // alg.aggressiveness
actual_decisions = [b.pop("decisions") for b in alg.metadata_["brackets"]]
paper_decisions = [b.pop("decisions") for b in alg.metadata["brackets"]]
for paper_iter, actual_iter in zip(paper_decisions, actual_decisions):
trimmed_paper_iter = {k for k in paper_iter if k <= alg_patience}
# This makes sure that the algorithm is executed faithfully when
# patience=True (and the proper decision points are preserved even if
# other stop-on-plateau points are added)
assert trimmed_paper_iter.issubset(set(actual_iter))
# This makes sure models aren't trained for too long
assert all(x <= alg_patience + 1 for x in actual_iter)
assert alg.metadata_["partial_fit_calls"] <= alg.metadata["partial_fit_calls"]
assert alg.best_score_ >= 0.9
max_iter = 6
kwargs = dict(max_iter=max_iter, aggressiveness=2)
alg = HyperbandSearchCV(model, params, patience=2, **kwargs)
with pytest.warns(UserWarning, match="The goal of `patience`"):
yield alg.fit(X, y)
alg = HyperbandSearchCV(model, params, patience=2, tol=np.nan, **kwargs)
yield alg.fit(X, y)
assert | pd.DataFrame(alg.history_) | pandas.DataFrame |
import numpy as np
import pandas as pd
import time, copy
import pickle as pickle
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from scipy.special import expit
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
import statsmodels.api as sm
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.python.eager.context import num_gpus
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sub_utils import exp_decay_scheduler, keras_count_nontrainable_params, resample_and_shuffle, create_tf_dataset, reshape_model_input
class Naive_Classifier:
'''
Create naive baseline classifier, that assigns a constant surrender rate, regardsless of the feature configuration.
Parameters
----------
rate: Constant probability prediction
'''
def __init__(self, rate, ):
self.rate = rate
def predict_proba(self, X):
pred = np.zeros(shape=(len(X),2))
pred[:,0] = 1-self.rate
pred[:,1]= self.rate
return pred
def predict(self, X):
return self.predict_proba(X)
def predict_class(self, X, threshold=0.5):
return self.predict_proba(X)>threshold
def create_ann(widths: list, actv: list, dropout: float, n_input: int, lrate: float):
'''
Create individual ANNs for ANN_bagging.
'''
model = Sequential()
for j in range(len(widths)):
if j==0: # Specify input size for first layer
model.add(Dense(units = widths[j], activation = actv[j], input_dim = n_input))
else:
model.add(Dense(units = widths[j], activation = actv[j]))
if j<(len(widths)-1): # No dropout after output layer
model.add(Dropout(rate = dropout))
model.compile(loss = 'binary_crossentropy', metrics= ['acc'], optimizer=Adam(lr=lrate))
return model
def hpsearch_ann(**params):
'''
Use params obtained via a hpsearch to create an ann.
This function is a helper function, to simplify the varying notation.
'''
widths = [params['width_{}'.format(1+i)] for i in range(params['depth'])]+[1]
actv = params['depth']*[params['actv']]+['sigmoid']
dropout = params['dropout']
n_input = params['n_input']
lrate = params['lrate']
model = create_ann(widths=widths, actv=actv, dropout=dropout, n_input= n_input, lrate = lrate)
return model
def hpsearch_boost_ann(resampler ='None', tf_dist_strat = None, **params):
'''
Helper function to map params to ANN_boost object initialization.
'''
N_boosting = params['n_boosting']
n_input = params['n_input']
boost_width = params['width']
actv = params['actv']
lrate = params['lrate']
return ANN_boost(N_models = N_boosting, N_input = n_input, width=boost_width, act_fct=actv, lr = lrate, resampler = resampler, tf_dist_strat=tf_dist_strat)
class Logit_model:
'''
A bagged version of the sklearn LogisticRegression model.
'''
def __init__(self, params, poly_degrees, N_bag = 5, resampler = 'None'):
self.poly_degrees = poly_degrees
self.resampler = resampler
self.N_bag = N_bag
try:
del params['random_state']
except:
pass
self.models = [LogisticRegression(**params) for _ in range(self.N_bag)]
def fit(self, X_train, y_train):
'''
Fit all individual models independently for data X, y.
'''
for i in range(self.N_bag):
# optional resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
X,y = sklearn.utils.shuffle(X,y)
# polynomial feature engineering
X_logit, y_logit = reshape_model_input(X, degrees_lst = self.poly_degrees), y
# fit model
self.models[i].fit(X_logit, y_logit)
# [self.models[i].fit(*shuffle(X_logit, y_logit, random_state=i)) for i in range(self.N_bag)]
return self # allow for one-line notation of creating and fitting the model
def predict_proba(self, X):
'''
Predict probabilities using the full ensembles of self.N_bag individual models.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.sum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/self.N_bag
def predict_proba_running_avg(self, X):
'''
Predict probabilities for all individual logit-models and report rolling average results, i.e. the benefit of adding more individual models to the ensemble.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.cumsum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/np.arange(1, self.N_bag+1).reshape((-1,1,1))
def predict_proba_individual(self, X):
'''
Predict probabilities for all individual logit-models and report them as an array of shape (N_bag, len(X), 2).
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)])
class ANN_bagging:
"""
Purpose: Build multiple ANN models, use the bagged predictor in combination with an optional resampling procedure to reduce the variance of a predictor.
New version - compatible with hpsklearn optimized parameter values as input
Initialize the architecture of all individual models in the bagging procedure.
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
resampler: 'None': No resampling
'SMOTE': SMOTE resampling
'undersampling': RandomUndersampling
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model object with type(object) = dict
"""
def __init__(self, N_models: int, hparams:dict, tf_dist_strat, resampler = 'None'):
self.resampler = resampler
self.model = {}
self.hparams = hparams
self.lr = hparams['lrate']
self.tf_dist_strat = tf_dist_strat
for i in range(N_models):
# create model i
try:
with self.tf_dist_strat.scope():
self.model[i] = hpsearch_ann(**hparams)
except:
self.model[i] = hpsearch_ann(**hparams)
# set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def re_init_ensemble(self):
'''
Note: If we load old parametrizations by setting self.model[i] = value, the self.ensemble does not update automatically.
Hence, we need this value for consistently loading old values.
'''
# re-set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def fit(self, X_train, y_train, callbacks = [], val_share = 0.3, N_epochs = 200):
"""
Purpose: Train all model instances in the bagging procedure.
output:
\t None. Updates parameters of all models in self.model
input
\t X_train, y_train: \t Training data
\t resampling_option: \t 'None': No resampling is performed
\t \t 'undersampling': random undersampling of the majority class
\t \t 'SMOTE': SMOTE methodology applied
\t callbacks: \t callbacks for training
\t val_share, N_epochs, N_batch: \t Additional arguments for training
"""
# handle pandas-datatype
if type(X_train)==type(pd.DataFrame([1])):
X_train=X_train.values
if type(y_train) == type(pd.DataFrame([1])):
y_train=y_train.values
# check if GPUs are available
try:
N_GPUs = self.tf_dist_strat.num_replicas_in_sync()
except:
N_GPUs = 1
for i in range(len(self.model)):
# utilze concept of resampling
X,y = resample_and_shuffle(X_train, y_train, self.resampler)
# transform into tf.data.Dataset
try:
train_data, val_data = create_tf_dataset(X, y, val_share, self.hparams['batch_size']*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
if len(self.model)==1:
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data = val_data, verbose = 2, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks)
else:
if i==0:
# More compact view on models' training progress
print('Data of shape {} '.format(X.shape) + 'and balance factor {}'.format(sum(y)/len(y)))
# Start training of model
print('Training Model {}'.format(i))
t_start = time.time()
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
n_epochs_trained = len(self.model[i].history.history['loss'])
print('\t ... {} epochs'.format(n_epochs_trained))
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
for _ in range(3):
print('\t ... Fine tuning')
# reduce learning rate
self.model[i].optimizer.learning_rate = self.model[i].optimizer.learning_rate/2
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
# print(self.model[i].history.history)
# n_epochs_trained += len(self.model[i].history.history['loss'])
print('\t ... Overall time: {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
print('Final fine tuning of whole bagged estimator:')
t_start = time.time()
try:
self.ensemble.fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_data= val_data, verbose = 0, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.ensemble.fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_split= val_share, verbose = 0, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.ensemble.history.history['val_loss'])))
print('\t ... {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# Return object to allow for shorter/ single-line notation, i.e. ANN_bagging().fit()
return self
def predict(self, X):
"""
Purpose: Predict event probability for data
Inputs:
-------
\t X: \t Input data
Outputs:
--------
\t Predictions for all input data
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return self.ensemble.predict(X)
def predict_proba(self, X):
"""
Purpose: Predict event probability for data
Replicate predict_proba method of Sequential() or Model() class to unify notation.
See documentation of self.predict() method.
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return self.predict(X)
def predict_classes(self, X, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return (self.predict(X)>= threshold)
class ANN_boost:
'''
Create a boosting instance with neural networks as weak learner instances.
As we add a new weak learner it will train primarily on errors of previous models. Boost rate equal 1, i.e. weak learners added by summation.
For the purpose of binary classification we impose a binary_crossentropy loss.
'''
def __init__(self, N_models, N_input, width: int, act_fct: str, lr = 0.001, tf_dist_strat = None, resampler = 'None'):
"""
Initialize the architecture of all individual models in the bagging procedure.
Model style of weak learner: input->hidden_layer-> actv_fct-> single output (incl linear actv) -> sigmoid actv (to be carved off when combining multiple weak learners)
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers.
Last entry should be linear, as boosting models add a final sigmoid activation to the added weak learners to ensure a proper probability distribution.
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model_base objects with type(object) = dict
"""
self.N_models = N_models
self.loss = 'binary_crossentropy'
self.N_input = N_input
self.width = width
self.act_fct = act_fct
self.tf_dist = tf_dist_strat
# self.dropout_rate = dropout_rate # canceled; not useful with only one hidden layer of which we tune its width
self.lr_init = lr
self.optimizer = Adam(learning_rate=self.lr_init)
self.resampler = resampler
self.history_val = []
self.history_train = []
self.training_steps = 0
# boosted models will be assigned during fitting procedure
#self.model_boost = [None]*self.N_models # depreciated version
self.model_boost = None # Save memory by reusing file-space, i.e. not saving each intermediate boosting step separately as they are recorded by self.model_base
# Create list of weak learner instances (compilation happens in creating functions)
# try:
# with self.tf_dist.scope():
# self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(self.N_models-1)]
# except Exception as e:
# print('Leaners not created within tf-distribution-strategy due to:')
# print(e)
self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(self.N_models-1)]
def fit(self, x, y, callbacks = [], val_share = 0.3, N_epochs = 200, N_batch = 64, correction_freq = 5):
'''
Fitting procedure for the ANN_boost object.
Inputs:
-------
x: Input Data
y: Targets
callbacks: list of tf.keras.callbacks objects, e.g. earlyStopping
val_share: share of (x,y) used for validation of the model during training and for potential callback options
N_epochs: number of epochs for training
N_batch: batch size for training
correction_freq: frequency in which a corrective step is performed, e.g. 0: never, 1: every epoch, 5: every 5 epochs, ...
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost.fit: y values changed from pandas.DataFrame to numpy.array')
# optional resampling
x,y = resample_and_shuffle(x, y, self.resampler)
# transform into tf.data.Dataset (important: transformation after optional resampling)
try:
train_data, val_data = create_tf_dataset(x,y,val_share, N_batch*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
if self.N_input != x.shape[1]:
raise ValueError('Error: Invalid input shape. Expected ({},) but given ({},)'.format(self.N_input, x.shape[1]))
# iterate over number of weak learners included in boosting
INPUT = Input(shape= (self.N_input,)) # re-use this input layer to avoid more cache-intensiv multi-inputs models
for n in range(1,self.N_models+1):
try:
with self.tf_dist.scope():
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
self.model_boost = Model(inputs = INPUT, outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))
else:
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
except Exception as e:
print('Booster not created within distribution strategy due to:')
print(e)
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
self.model_boost = Model(inputs = INPUT, outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))#.output))
else:
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
# train boosting model
print('Training Model {}'.format(n))
print('\t trainable params: '+ str(keras_count_nontrainable_params(self.model_boost, trainable=True)))
print('\t nontrainable params: '+ str(keras_count_nontrainable_params(self.model_boost, trainable=False)))
t_start = time.time()
if (n==1):
# set weights = 0 and bias = sigmoid^-1(baseline_hazard)
try:
with self.tf_dist.scope():
self.model_boost.layers[1].set_weights([np.array([0]*self.N_input).reshape((-1,1)), np.array([np.log(y.mean()/(1-y.mean()))])])
except Exception as e:
print('Setting weights of baseline-learner not performed within tf-distribution-strategy due to:')
print(e)
self.model_boost.layers[1].set_weights([np.array([0]*self.N_input).reshape((-1,1)), np.array([np.log(y.mean()/(1-y.mean()))])])
else:
try:
# if data in tf.data.Dataset format available
print('\t .. training on tf.data.Dataset')
self.model_boost.fit(x=train_data, validation_data = val_data, epochs = N_epochs, verbose = 2, callbacks=callbacks)
except Exception as e:
print('Leaners not created within tf-distribution-strategy due to:')
print(e)
self.model_boost.fit(x=x, y = y, batch_size= N_batch, epochs = N_epochs, validation_split= val_share, verbose = 0, callbacks=callbacks)
self.history_val += self.model_boost.history.history['val_loss']
self.history_train += self.model_boost.history.history['loss']
# evolutionary fitting of boosting model
#self.fit_evolutionary(x=x, y=y, batch_size=N_batch, epochs=N_epochs, epochs_per_it=25, validation_split=val_share, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.history_val)-self.training_steps))
self.training_steps = len(self.history_val)
print('\t ... {} sec.'.format(time.time()-t_start))
#print('\t ... eval.: ', self.model_boost.evaluate(x,y, verbose=0)) # optional: display to observe progress of training; however, slows down training.
print('\t ... Done!')
# decaying influence of weak learners
#self.optimizer.lr = self.lr_init*0.9**n
# corrective step: set all parameters as trainable and update them using SGD
if n>1:
if (correction_freq > 0) & (n%correction_freq ==0):
self.corrective_step(model = self.model_boost, x=x, y=y, callbacks=callbacks,
val_share=val_share, N_epochs = N_epochs, N_batch= N_batch)
# set trainable = False for weak learner that has been included in the boosting model
self.model_base[n-1].trainable = False
def fit_evolutionary(self, x, y, batch_size, epochs, epochs_per_it, validation_split, callbacks):
'''
Customized training scheme, using early stopping/ callbacks and a iterative reduction of the initial learning rate.
## DEPRECIATED as not very affective in the given scenario
'''
self.model_boost.fit(x=x, y = y, batch_size= batch_size, epochs = epochs_per_it, validation_split=validation_split, verbose = 0, callbacks=callbacks)
self.history_train += self.model_boost.history.history['loss']
self.history_val += self.model_boost.history.history['val_loss']
#print(self.history_train)
#print(type(self.history_train))
val_loss = min(self.history_val)
#print('minimum val_loss: ', val_loss)
evol_patience = 0
for ep in range(epochs//epochs_per_it):
self.optimizer.lr= self.lr_init*1.2**(1+ep%4)
# compile to effectively update lr
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
print(' \t Fine tuning step ', ep, '...', ' (val_loss: ', np.round_(val_loss,4), ')')
self.model_boost.fit(x=x, y = y, batch_size=batch_size, epochs = epochs_per_it, validation_split=validation_split, verbose = 0, callbacks=callbacks)
# record training/ validation history
self.history_train += self.model_boost.history.history['loss']
self.history_val += self.model_boost.history.history['val_loss']
if min(self.history_val) < val_loss*0.99:
val_loss = min(self.history_val)
else:
evol_patience += 1
if evol_patience > 3:
break
def corrective_step(self, model, x, y, callbacks = [], val_share = 0.3, N_epochs = 200, N_batch = 64):
'''
Perform a corrective step by updating all parameters of boosting model, i.e. all included weak learners.
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost.fit: y values changed from pandas.DataFrame to numpy.array')
# transform into tf.data.Dataset
try:
train_data, val_data = create_tf_dataset(x,y,val_share, N_batch*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
# allow updating of all parameters
try:
with self.tf_dist.scope():
model.trainable = True
model.compile(optimizer = Adam(lr=self.lr_init/2), loss = self.loss, metrics = ['acc'])
except Exception as e:
print('Leaners not created within tf-distribution-strategy due to:')
print(e)
model.trainable = True
model.compile(optimizer = Adam(lr=self.lr_init/2), loss = self.loss, metrics = ['acc'])
print('Corrective Step ... ')
print('\t trainable params: '+ str(keras_count_nontrainable_params(model, trainable=True)))
print('\t nontrainable params: '+ str(keras_count_nontrainable_params(model, trainable=False)))
t_start = time.time()
#self.fit_evolutionary(x=x, y=y, batch_size=N_batch, epochs=N_epochs, epochs_per_it=25, validation_split=val_share, callbacks=callbacks)
try:
# train with tf.data.dataset; explicitly indicate val_data; batch_size indicated in tf.data.dataset
model.fit(x=train_data, epochs = N_epochs, validation_data= val_data, verbose = 2, callbacks=callbacks)
except Exception as e:
print('Model not created within tf-distribution-strategy due to:')
print(e)
model.fit(x=x, y = y, batch_size= N_batch, epochs = N_epochs, validation_split= val_share, verbose = 2, callbacks=callbacks)
print('\t ... {} epochs'.format(len(model.history.history['val_loss'])))
run_time = time.time()-t_start
print('\t ... {} sec.'.format(run_time))
print('\t ... Correction performed!')
# Lock updates
model.trainable = False
return run_time
def save_object(self, path):
'''
Function to save the ANN_boost object.
Required, as e.g. Sequential()-Object in self.model_base[i] cannot be pickled or dilled.
Hence, we save only the respective weights and provide a function load_object to restore the fully functional ANN_boost object.
Note: load_ANN_boost_object is no ANN_boost object function. However, the loaded ANN_boost object uses object.restore_learners() to restore learners and boosted models.
'''
# save weights of learners
#self.model_base = [self.model_base[i].get_weights() for i in range(self.N_models)]
# delete boosted models temporarily for pickling; can be restored with weights of (trained) learners
#cache = clone_model(self.model_boost)
#cache.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
model_backup = ANN_boost(N_models= self.N_models, N_input= self.N_input, width = self.width, act_fct = self.act_fct)
model_backup.model_base = [sub_model.get_weights() for sub_model in self.model_base] # save only weights -> to be restored in self.restore_learners()
# Note: Adam-object cannot be pickled in tf 2.4.
# workaround: switch to string-information and restore full optimizer (incl. learning_rate) in restore_learners
model_backup.optimizer = 'adam'
#self.model_boost = None#*self.N_models
with open( path, "wb" ) as file:
pickle.dump(model_backup, file)
print('ANN object dumped to ', path)
#self.model_boost = cache
def restore_learners(self):
'''
Restore the full Sequential() architecture of self.model_base[i] and self.model_boost[i] which were replaced by their weights to pickle dump the object.
'''
weights = copy.copy(self.model_base)
self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(1,self.N_models)]
[self.model_base[i].set_weights(weights[i]) for i in range(self.N_models)]
#print(self.model_base)
# iterate over number of weak learners included in boosting
for n in range(1,self.N_models+1):
INPUT = Input(shape= (self.N_input,))
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
#self.model_boost[n-1] = Model(inputs = self.model_base[0].input,
# outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0].output))
self.model_boost = Model(inputs = INPUT,#self.model_base[0].input,
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))#.output))
else:
#self.model_boost[n-1]
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
def create_model_prior(self):
'''
Base model 0 in boosting structure; expresses a prior estimate (here constant rate) that will be improved by subsequent model created by create_model_learner.
'''
model = Sequential()
model.add(Dense(1, activation= 'linear', input_dim = self.N_input))
model.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
return model
def create_model_learner(self):
'''
Create architecture for weak learners in boosting strategy.
'''
model = Sequential()
# Hidden layer
try:
model.add(Dense(units = self.width, activation = self.act_fct, input_dim = self.N_input))
except:
# old implementation
model.add(Dense(units = self.width_lst[0], activation = self.act_fct_lst[0], input_dim = self.N_input))
print('sub_surrender_models, create_model_learner(): atributes width_lst and act_fct_lst depreciated!')
# Output layer
model.add(Dense(units = 1, activation = 'linear'))
model.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
return model
def prune_booster(self, n_learners:int):
'''
Take user input how many weak learners should be utilized. The rest will be discarded.
'''
assert n_learners<= self.N_models
assert n_learners > 1
INPUT = Input(shape= (self.N_input,)) # re-use this input layer to avoid more cache-intensiv multi-inputs models
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n_learners)]# .output for i in range(n)]
)
)
)
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
def evaluate(self, x, y=None):
try:
# x is tf.data.Dataset
return self.model_boost.evaluate(x, verbose=0)
except:
return self.model_boost.evaluate(x,y, verbose=0)
def predict_proba(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
# Use last iteration of boosting procedure
# Note: tf.keras.models.Model() does not posses .predict_proba(), but only .predict()
return self.model_boost.predict(x)
def predict(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type( | pd.DataFrame([1]) | pandas.DataFrame |
"""
This script reads the the long form dataset and trims it. It
does this by grouping the dataset by free-flowing status, continent, and
ocean connectivity. The script then individually sums the length,
volume, and discharge of the river reaches within each category.
"""
import pandas as pd
#%%
# Set to the directory that your csv file is in
# data_dir = curr_dir + "RiverReaches.csv"
data_dir = ""
RiverReaches = pd.read_csv(data_dir + "RiverReaches.csv", low_memory=False)
river_df = | pd.DataFrame(RiverReaches) | pandas.DataFrame |
from typing import Type, Callable, Tuple, Union
import numpy as np
import pandas as pd
import pytest
from py4j.java_gateway import JVMView
from keanu import set_deterministic_state
from keanu.context import KeanuContext
from keanu.vartypes import tensor_arg_types, primitive_types, numpy_types, pandas_types
from keanu.vertex import Gaussian, Const, UniformInt, Bernoulli, IntegerProxy, Double
from keanu.vertex.base import Vertex
@pytest.fixture
def jvm_view():
from py4j.java_gateway import java_import
jvm_view = KeanuContext().jvm_view()
java_import(jvm_view, "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GaussianVertex")
return jvm_view
def assert_vertex_value_equals_scalar(vertex: Vertex, expected_type: Type, scalar: primitive_types) -> None:
vertex_value = vertex.get_value()
assert vertex_value == scalar
assert type(vertex_value) == numpy_types
assert vertex_value.shape == ()
assert vertex_value.dtype == expected_type
def assert_vertex_value_equals_ndarray(vertex: Vertex, expected_type: Type, ndarray: numpy_types) -> None:
vertex_value = vertex.get_value()
expected_value = ndarray.astype(expected_type)
assert np.array_equal(vertex_value, expected_value)
assert np.issubdtype(vertex_value.dtype, expected_type)
def assert_vertex_value_equals_pandas(vertex: Vertex, expected_type: Type, pandas: pandas_types) -> None:
get_value = vertex.get_value()
expected_value = pandas.values.astype(expected_type).reshape(get_value.shape)
assert np.array_equal(get_value, expected_value)
assert np.issubdtype(get_value.dtype, expected_type)
def test_can_pass_scalar_to_vertex() -> None:
gaussian = Gaussian(0., 1.)
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_ndarray_to_vertex() -> None:
gaussian = Gaussian(np.array([0.1, 0.4]), np.array([0.4, 0.5]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_pandas_dataframe_to_vertex() -> None:
gaussian = Gaussian(pd.DataFrame(data=[0.1, 0.4]), pd.DataFrame(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2, 1)
def test_can_pass_pandas_series_to_vertex() -> None:
gaussian = Gaussian(pd.Series(data=[0.1, 0.4]), pd.Series(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_vertex_to_vertex(jvm_view: JVMView) -> None:
mu = Gaussian(0., 1.)
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", mu, Const(1.))
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_array_to_vertex(jvm_view: JVMView) -> None:
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", [3, 3], Const(0.), Const(1.))
sample = gaussian.sample()
assert sample.shape == (3, 3)
def test_cannot_pass_generic_to_vertex(jvm_view: JVMView) -> None:
class GenericExampleClass:
pass
with pytest.raises(ValueError, match=r"Can't parse generic argument. Was given {}".format(GenericExampleClass)):
Vertex( # type: ignore # this is expected to fail mypy
jvm_view.GaussianVertex, "gaussian", GenericExampleClass(), GenericExampleClass())
def test_int_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1, 2], [3, 4]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.int64 or value.dtype == np.int32
assert (value == ndarray).all()
def test_float_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1., 2.], [3., 4.]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert (value == ndarray).all()
def test_boolean_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[True, True], [False, True]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.bool_
assert (value == ndarray).all()
def test_scalar_vertex_value_is_a_numpy_array() -> None:
scalar = 1.
vertex = Const(scalar)
value = vertex.get_value()
assert type(value) == numpy_types
assert value.shape == ()
assert value.dtype == float
assert value == scalar
def test_vertex_sample_is_a_numpy_array() -> None:
mu = np.array([[1., 2.], [3., 4.]])
sigma = np.array([[.1, .2], [.3, .4]])
vertex = Gaussian(mu, sigma)
value = vertex.sample()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert value.shape == (2, 2)
def test_get_connected_graph() -> None:
gaussian = Gaussian(0., 1.)
connected_graph = set(gaussian.iter_connected_graph())
assert len(connected_graph) == 3
def test_id_str_of_downstream_vertex_is_higher_than_upstream() -> None:
hyper_params = Gaussian(0., 1.)
gaussian = Gaussian(0., hyper_params)
hyper_params_id = hyper_params.get_id()
gaussian_id = gaussian.get_id()
assert type(hyper_params_id) == tuple
assert type(gaussian_id) == tuple
assert hyper_params_id < gaussian_id
def test_construct_vertex_with_java_vertex() -> None:
java_vertex = Gaussian(0., 1.).unwrap()
python_vertex = Vertex._from_java_vertex(java_vertex)
assert tuple(java_vertex.getId().getValue()) == python_vertex.get_id()
def test_java_collections_to_generator() -> None:
gaussian = Gaussian(0., 1.)
java_collections = gaussian.unwrap().getConnectedGraph()
python_list = list(Vertex._to_generator(java_collections))
java_vertex_ids = [Vertex._get_python_id(java_vertex) for java_vertex in java_collections]
assert java_collections.size() == len(python_list)
assert all(type(element) == Double and element.get_id() in java_vertex_ids for element in python_list)
def test_get_vertex_id() -> None:
gaussian = Gaussian(0., 1.)
java_id = gaussian.unwrap().getId().getValue()
python_id = gaussian.get_id()
assert all(value in python_id for value in java_id)
def test_ids_are_reset() -> None:
gaussian = Gaussian(0., 1.)
set_deterministic_state()
gaussian2 = Gaussian(0., 1.)
assert gaussian.get_id() == gaussian2.get_id()
@pytest.mark.parametrize("vertex, expected_type", [(Gaussian(0., 1.), np.floating), (UniformInt(0, 10), np.integer),
(Bernoulli(0.5), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[4]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[5.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1, 2, 3]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1., 2., 3.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True, False, False]]), assert_vertex_value_equals_pandas)])
def test_you_can_set_value(vertex: Vertex, expected_type: Type, value: tensor_arg_types,
assert_vertex_value_equals: Callable) -> None:
vertex.set_value(value)
assert_vertex_value_equals(vertex, expected_type, value)
@pytest.mark.parametrize("vertex, expected_type, value", [(Gaussian(0., 1.), float, 4.), (UniformInt(0, 10), int, 5),
(Bernoulli(0.5), bool, True)])
def test_you_can_set_scalar_value(vertex, expected_type, value):
vertex.set_value(value)
assert_vertex_value_equals_scalar(vertex, expected_type, value)
@pytest.mark.parametrize("ctor, args, expected_type", [(Gaussian, (0., 1.), np.floating),
(UniformInt, (0, 10), np.integer), (Bernoulli,
(0.5,), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[4]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[5.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1, 2, 3]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1., 2., 3.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True, False, False]]), assert_vertex_value_equals_pandas)])
def test_you_can_set_and_cascade(ctor: Callable, args: Union[Tuple[float, ...], Tuple[int, ...]], expected_type: Type,
value: tensor_arg_types, assert_vertex_value_equals: Callable) -> None:
vertex1 = ctor(*args)
vertex2 = ctor(*args)
equal_vertex = vertex1 == vertex2
not_equal_vertex = vertex1 != vertex2
vertex1.set_value(value)
vertex2.set_and_cascade(value)
assert_vertex_value_equals(vertex1, expected_type, value)
assert_vertex_value_equals(vertex2, expected_type, value)
two_values_are_equal = equal_vertex.get_value()
is_scalar = type(two_values_are_equal) == bool
assert type(two_values_are_equal) == bool or two_values_are_equal.dtype == np.bool_
if not is_scalar:
assert two_values_are_equal.shape == vertex1.get_value().shape == vertex2.get_value().shape
assert np.all(two_values_are_equal)
two_values_are_not_equal = not_equal_vertex.get_value()
assert type(two_values_are_equal) == bool or two_values_are_not_equal.dtype == np.bool_
if not is_scalar:
assert two_values_are_not_equal.shape == vertex1.get_value().shape == vertex2.get_value().shape
assert np.all(np.invert(two_values_are_not_equal))
@pytest.mark.parametrize("ctor, args, expected_type, value", [(Gaussian, (0., 1.), float, 4.),
(UniformInt, (0, 10), int, 5),
(Bernoulli, (0.5,), bool, True)])
def test_you_can_set_and_cascade_scalar(ctor, args, expected_type, value) -> None:
test_you_can_set_and_cascade(ctor, args, expected_type, value, assert_vertex_value_equals_scalar)
@pytest.mark.parametrize("ctor, args, expected_type", [(Gaussian, (0., 1.), np.floating),
(UniformInt, (0, 10), np.integer), (Bernoulli,
(0.5,), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[4]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[5.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1, 2, 3]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1., 2., 3.]]), assert_vertex_value_equals_pandas),
( | pd.DataFrame(data=[[True, False, False]]) | pandas.DataFrame |
import pandas as pd
def get_zipcode(lat, lon, all_l):
row = all_l[(all_l['latitude'] == lat) & (all_l['longitude'] == lon)]
print(row)
print("*")
if __name__ == "__main__":
root_path = "/Users/shravya/Documents/CMU/Interactive_Data_Science/Assignments/3/Code2/data/"
reviews = {'NYC': pd.read_csv(root_path + 'NYC_reviews.csv')}
NYC_listings = {'01': pd.read_csv(root_path + '2020/NYC/listings_01.csv'),
'02': pd.read_csv(root_path + '2020/NYC/listings_02.csv'),
'03': pd.read_csv(root_path + '2020/NYC/listings_03.csv'),
'04': pd.read_csv(root_path + '2020/NYC/listings_04.csv'),
'05': pd.read_csv(root_path + '2020/NYC/listings_05.csv'),
'06': pd.read_csv(root_path + '2020/NYC/listings_06.csv'),
'07': pd.read_csv(root_path + '2020/NYC/listings_07.csv')}
covid_data = pd.read_csv(root_path + 'data-by-modzcta.csv')
covid_data = covid_data.rename(columns={"MODIFIED_ZCTA": "zipcode"})
for key in NYC_listings.keys():
df = NYC_listings[key]
df = df[['zipcode', 'latitude', 'longitude']]
NYC_listings[key] = df
all_listings = pd.concat([NYC_listings['01'], NYC_listings['02'], NYC_listings['03'],
NYC_listings['04'], NYC_listings['05'], NYC_listings['06'],
NYC_listings['07']], ignore_index=True)
all_listings.drop_duplicates(subset=['zipcode'], inplace=True, keep='last')
all_listings['latitude'] = pd.to_numeric(all_listings['latitude'])
all_listings['longitude'] = pd.to_numeric(all_listings['longitude'])
# Now join zipcode info with covid
covid_data['zipcode'] = covid_data['zipcode'].apply(str)
covid_zipcode = covid_data.merge(all_listings, on='zipcode', how='left')
covid_zipcode = covid_zipcode.dropna(subset=['latitude', 'longitude'])
# Write to a file
# covid_zipcode.to_csv('covid_data_cleaned.csv', index=False)
# Get zipcode for months 8 and 9
month8 = pd.read_csv(root_path + '2020/NYC/listings_08.csv')
month9 = pd.read_csv(root_path + '2020/NYC/listings_09.csv')
NYC_listings = {'01': pd.read_csv(root_path + '2020/NYC/listings_01.csv'),
'02': pd.read_csv(root_path + '2020/NYC/listings_02.csv'),
'03': pd.read_csv(root_path + '2020/NYC/listings_03.csv'),
'04': pd.read_csv(root_path + '2020/NYC/listings_04.csv'),
'05': | pd.read_csv(root_path + '2020/NYC/listings_05.csv') | pandas.read_csv |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
:return: 特征矩阵 pandas.DataFrame
"""
id = startid # 序列号
feature_set = pd.DataFrame() # 特征矩阵
# 单独的特征向量
labels = pd.Series()
emotion_vector = pd.Series()
songname_vector = pd.Series()
tempo_vector = | pd.Series() | pandas.Series |
'''
This method uses these features
['dow', 'year', 'month', 'day_of_week', 'holiday_flg', 'min_visitors', 'mean_visitors', 'median_visitors', 'max_visitors', 'count_observations', 'air_genre_name', 'air_area_name', 'latitude', 'longitude', 'rs1_x', 'rv1_x', 'rs2_x', 'rv2_x', 'rs1_y', 'rv1_y', 'rs2_y', 'rv2_y', 'total_reserv_sum', 'total_reserv_mean', 'total_reserv_dt_diff_mean']
RMSE GradientBoostingRegressor: 0.501477019571
RMSE KNeighborsRegressor: 0.421517079307
'''
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
data = {
'tra': pd.read_csv('./data/air_visit_data.csv'),
'as': pd.read_csv('./data/air_store_info.csv'),
'hs': | pd.read_csv('./data/hpg_store_info.csv') | pandas.read_csv |
#!/usr/bin/env python
import argparse
import pandas as pd
import re
#read arguments
parser = argparse.ArgumentParser(description="Recluster the gene clusters by species pairs based on orthopairs")
parser.add_argument("--orthopairs", "-op", required=True)
parser.add_argument("--orthogroups", "-og", required=True)
parser.add_argument("--species1", "-s1", required=True)
parser.add_argument("--species2", "-s2", required=True)
parser.add_argument("--output_file", "-out", required=True)
args = parser.parse_args()
my_orthopairs = args.orthopairs
my_orthogroup = args.orthogroups
species1 = args.species1
species2 = args.species2
my_output = args.output_file
#### Main
#read input
orthopairs_df = pd.read_table(my_orthopairs, sep="\t", header=0, names=["GeneID1", "GeneID2"])
orthopairs_df["GeneID1"] = [re.sub(".*\\|", "", element) for element in list(orthopairs_df["GeneID1"])]
orthopairs_df["GeneID2"] = [re.sub(".*\\|", "", element) for element in list(orthopairs_df["GeneID2"])]
orthogroups_df = pd.read_table(my_orthogroup, sep="\t", header=0, names=["ClusterID", "Species", "GeneID", ""], index_col=None)
#add Species to orthopairs
geneID_species_dict = pd.Series(orthogroups_df.Species.values, index=orthogroups_df.GeneID).to_dict()
orthopairs_df["Species1"] = orthopairs_df["GeneID1"].map(geneID_species_dict)
orthopairs_df["Species2"] = orthopairs_df["GeneID2"].map(geneID_species_dict)
#add ClusterID to orthopairs
geneID_clusterID_dict = pd.Series(orthogroups_df.ClusterID.values, index=orthogroups_df.GeneID).to_dict()
orthopairs_df["ClusterID"] = orthopairs_df["GeneID1"].map(geneID_clusterID_dict) #GeneID1 and GeneID2 would do the same job here.
#filter only for species1 and species2 orthologs.
species_pair_df = orthopairs_df.loc[orthopairs_df.Species1.isin([species1, species2])]
species_pair_df = species_pair_df.loc[orthopairs_df.Species2.isin([species1, species2])]
#group by gene cluster
species_pair_grouped_df = species_pair_df.groupby("ClusterID")
final_df = | pd.DataFrame() | pandas.DataFrame |
# Environment Setup
# ----------------------------------------------------------------
# Dependencies
import csv
import pandas as pd
import random
import numpy as np
# Output File Name
file_output_players = "generated_data/players_complete.csv"
file_output_items = "generated_data/items_complete.csv"
# file_output_purchases_json = "generated_data/purchase_data_3.json"
file_output_purchases_csv = "generated_data/purchase_data.csv"
# Convert the Players List to a Data Frame
players = pd.read_csv("raw_data/players.csv", dtype="str", header=0)
total_players = len(players)
items = pd.read_table("raw_data/items.txt", delimiter="\t", dtype="str")
total_items = len(items)
# Generator Conditions (Change as Needed)
# ----------------------------------------------------------------
# Population Counts
total_purchase_count = 780
player_count = len(players) - 27
item_count = len(items) - 6
# Player Weight
genders = ["Male", "Female", "Other / Non-Disclosed"]
gender_weights = [0.82, 0.16, 0.02]
age_ranges = [7, 15, 20, 25, 30, 35, 40, 45]
age_weights = [0.01, 0.09, 0.20, 0.46, 0.10, 0.08, 0.05, 0.01]
# Item Prices
low_price = 1
high_price = 5
# Generate Players
# ----------------------------------------------------------------
# Generate all gender probabilities
gender_probabilities = zip(genders, gender_weights)
gender_profiles = []
# Generate a sufficient number of genders
for gender in gender_probabilities:
gender_profiles = gender_profiles + \
[gender[0]] * int(gender[1] * total_players)
# Generate random ages
age_probabilities = zip(age_ranges, age_weights)
age_counts = []
age_profiles = []
for age in age_probabilities:
age_counts = age_counts + [int(age[1] * total_players)]
age_probabilities = zip(age_counts, age_ranges)
# Generate right number of random numbers
prev_age = age_ranges[0]
for age in age_probabilities:
for x in range(age[0]):
age_profiles = age_profiles + [random.randint(prev_age, age[1])]
prev_age = age[1]
random.shuffle(gender_profiles)
random.shuffle(age_profiles)
# Convert lists into pandas data frames
gender_profiles_pd = pd.Series(gender_profiles)
age_profiles_pd = | pd.Series(age_profiles) | pandas.Series |
# -*- coding: utf-8 -*-
"""
@author: meslami
"""
# Multilayer Perceptron
import pandas
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
import tensorflow.keras
import math
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2DTranspose,Input, Reshape, Conv2D, Flatten
from tensorflow.keras.layers import Dense,concatenate
from sklearn.metrics import mean_squared_error
import argparse
#from tensorflow.keras.utils.np_utils import to_categorical
import tensorflow as tf
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dropout
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from scipy.stats import pearsonr, spearmanr
####################
####### Functions #############
####################
def custom_loss_2 (y_true, y_pred):
A = tensorflow.keras.losses.mean_absolute_error(y_true, y_pred)
return A
def lrelu(x): #from pix2pix code
a=0.2
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def lrelu_output_shape(input_shape):
shape = list(input_shape)
return tuple(shape)
from tensorflow.keras.layers import Lambda
layer_lrelu=Lambda(lrelu, output_shape=lrelu_output_shape)
def FnCreateTargetImages(Labels):
OutputImages=np.zeros(shape=(len(Labels),45,45,3))
lables_int=Labels.astype(int)
for i in range(len(Labels)):
OutputImages[i,10:35,:lables_int[i],0]=0
OutputImages[i,10:35,:lables_int[i],1]=1
OutputImages[i,10:35,:lables_int[i],2]=1
return OutputImages
def FnCreateValidLabes(Labels):
return range(len(Labels))
####################
###### End of functions ##############
####################
####################
###### Reading input arguments ##############
#########################################################
######### Hyper paramters configurations ##################
########### ##########################################################
parser = argparse.ArgumentParser()
parser.add_argument("--output", )
parser.add_argument("--max_epochs", )
parser.add_argument("--BatchSize", )
parser.add_argument("--k", )
parser.add_argument("--m", )
a = parser.parse_args()
a.max_epochs=300
a.BatchSize=1000
a.output='./1/'
import os
try:
os.stat(a.output)
except:
os.mkdir(a.output)
####################
###### Preparing Data ##############
###### code from https://github.com/daniel-codes/hospital-los-predictor ##############
####################
###### Reading Data
####################
# Primary Admissions information
df = | pandas.read_csv('./data/ADMISSIONS.csv') | pandas.read_csv |
import base64
import datetime
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from xlrd.xldate import xldate_as_datetime
from yattag import Doc
plt.rcParams.update({"figure.autolayout": True})
import matplotlib.gridspec as gridspec
import pandas as pd
import scipy.stats
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import logging
"""
TF_CPP_MIN_LOG_LEVEL:
Defaults to 0, so all logs are shown. Set TF_CPP_MIN_LOG_LEVEL to 1 to filter out INFO logs, 2 to additionally filter out WARNING, 3 to additionally filter out ERROR.
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
from tensorflow import keras
class NNetwork(object):
def __init__(self, network_count=200, epochs=1000):
logging.getLogger().setLevel(logging.INFO)
self.xl_dateformat = r"%Y-%m-%dT%H:%M"
self.model = None
self.pretrained_networks = []
self.software_version = "2.0.1"
self.input_filename = None
self.today = str(datetime.date.today())
self.avg_time_elapsed = 0
self.predictors_scaler = MinMaxScaler(feature_range=(-1, 1))
self.targets_scaler = MinMaxScaler(feature_range=(-1, 1))
self.history = None
self.file = None
self.skipped_rows = []
self.ruleset = []
self.layer1_neurons = 12
self.network_count = network_count
self.epochs = epochs
self.predictors = None
self.targets = None
self.predictions = None
self.avg_case_results_am = None
self.avg_case_results_pm = None
self.worst_case_results_am = None
self.worst_case_results_pm = None
self.WB_bandwidth = None
self.post_process_check = False # Is post-processed better than raw. If False, uses raw results, if true, uses post-processed results
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(self.layer1_neurons, input_dim=5, activation="tanh")
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer, metrics=["mse"])
def import_data_from_csv(self, filename):
"""
Imports data to the network by a comma-separated values (CSV) file.
Load data to a network that are stored in .csv file format.
The data loaded from this method can be used both for training reasons as
well as to make predictions.
:param filename: String containing the filename of the .csv file containing the input data (e.g "input_data.csv")
"""
df = pd.read_csv(filename)
self.file = df.copy()
global FRC_IN
global FRC_OUT
global WATTEMP
global COND
# Locate the fields used as inputs/predictors and outputs in the loaded file
# and split them
if "se1_frc" in self.file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in self.file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in self.file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
# Standardize the DataFrame by specifying rules
# To add a new rule, call the method execute_rule with the parameters (description, affected_column, query)
self.execute_rule("Invalid tapstand FRC", FRC_IN, self.file[FRC_IN].isnull())
self.execute_rule("Invalid household FRC", FRC_OUT, self.file[FRC_OUT].isnull())
self.execute_rule(
"Invalid tapstand date/time",
"ts_datetime",
self.valid_dates(self.file["ts_datetime"]),
)
self.execute_rule(
"Invalid household date/time",
"hh_datetime",
self.valid_dates(self.file["hh_datetime"]),
)
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True) # fix dropped indices in pandas
# Locate the rows of the missing data
drop_threshold = 0.90 * len(self.file.loc[:, [FRC_IN]])
nan_rows_watt = self.file.loc[self.file[WATTEMP].isnull()]
if len(nan_rows_watt) < drop_threshold:
self.execute_rule(
"Missing Water Temperature Measurement",
WATTEMP,
self.file[WATTEMP].isnull(),
)
nan_rows_cond = self.file.loc[self.file[COND].isnull()]
if len(nan_rows_cond) < drop_threshold:
self.execute_rule("Missing EC Measurement", COND, self.file[COND].isnull())
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True)
start_date = self.file["ts_datetime"]
end_date = self.file["hh_datetime"]
durations = []
all_dates = []
collection_time = []
for i in range(len(start_date)):
try:
# excel type
start = float(start_date[i])
end = float(end_date[i])
start = xldate_as_datetime(start, datemode=0)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = xldate_as_datetime(end, datemode=0)
except ValueError:
# kobo type
start = start_date[i][:16].replace("/", "-")
end = end_date[i][:16].replace("/", "-")
start = datetime.datetime.strptime(start, self.xl_dateformat)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = datetime.datetime.strptime(end, self.xl_dateformat)
durations.append((end - start).total_seconds())
all_dates.append(datetime.datetime.strftime(start, self.xl_dateformat))
self.durations = durations
self.time_of_collection = collection_time
self.avg_time_elapsed = np.mean(durations)
# Extract the column of dates for all data and put them in YYYY-MM-DD format
self.file["formatted_date"] = all_dates
predictors = {
FRC_IN: self.file[FRC_IN],
"elapsed time": (np.array(self.durations) / 3600),
"time of collection (0=AM, 1=PM)": self.time_of_collection,
}
self.targets = self.file.loc[:, FRC_OUT]
self.var_names = [
"Tapstand FRC (mg/L)",
"Elapsed Time",
"time of collection (0=AM, 1=PM)",
]
self.predictors = pd.DataFrame(predictors)
if len(nan_rows_watt) < drop_threshold:
self.predictors[WATTEMP] = self.file[WATTEMP]
self.var_names.append("Water Temperature(" + r"$\degree$" + "C)")
self.median_wattemp = np.median(self.file[WATTEMP].dropna().to_numpy())
self.upper95_wattemp = np.percentile(
self.file[WATTEMP].dropna().to_numpy(), 95
)
if len(nan_rows_cond) < drop_threshold:
self.predictors[COND] = self.file[COND]
self.var_names.append("EC (" + r"$\mu$" + "s/cm)")
self.median_cond = np.median(self.file[COND].dropna().to_numpy())
self.upper95_cond = np.percentile(self.file[COND].dropna().to_numpy(), 95)
self.targets = self.targets.values.reshape(-1, 1)
self.datainputs = self.predictors
self.dataoutputs = self.targets
self.input_filename = filename
def set_up_model(self):
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(
self.layer1_neurons,
input_dim=len(self.datainputs.columns),
activation="tanh",
)
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer)
def train_SWOT_network(self, directory):
"""Train the set of 200 neural networks on SWOT data
Trains an ensemble of 200 neural networks on se1_frc, water temperature,
water conductivity."""
if not os.path.exists(directory):
os.makedirs(directory)
self.predictors_scaler = self.predictors_scaler.fit(self.predictors)
self.targets_scaler = self.targets_scaler.fit(self.targets)
x = self.predictors
t = self.targets
self.calibration_predictions = []
self.trained_models = {}
for i in range(self.network_count):
logging.info('Training Network ' + str(i))
model_out = self.train_network(x, t, directory)
self.trained_models.update({'model_' + str(i): model_out})
def train_network(self, x, t, directory):
"""
Trains a single Neural Network on imported data.
This method trains Neural Network on data that have previously been imported
to the network using the import_data_from_csv() method.
The network used is a Multilayer Perceptron (MLP). Input and Output data are
normalized using MinMax Normalization.
The input dataset is split in training and validation datasets, where 80% of the inputs
are the training dataset and 20% is the validation dataset.
The training history is stored in a variable called self.history (see keras documentation:
keras.model.history object)
Performance metrics are calculated and stored for evaluating the network performance.
"""
tf.keras.backend.clear_session()
early_stopping_monitor = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10,
restore_best_weights=True)
x_norm = self.predictors_scaler.transform(x)
t_norm = self.targets_scaler.transform(t)
trained_model = keras.models.clone_model(self.model)
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(x_norm, t_norm, train_size=0.333,
shuffle=True)
new_weights = [np.random.uniform(-0.05, 0.05, w.shape) for w in trained_model.get_weights()]
trained_model.set_weights(new_weights)
trained_model.compile(loss='mse', optimizer=self.optimizer)
trained_model.fit(x_norm_train, t_norm_train, epochs=self.epochs, validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor], verbose=0, batch_size=len(t_norm_train))
self.calibration_predictions.append(self.targets_scaler.inverse_transform(trained_model.predict(x_norm)))
return trained_model
def calibration_performance_evaluation(self, filename):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)
FRC_X = self.datainputs[FRC_IN].to_numpy()
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
c = self.network_count
alpha = np.zeros((test_len, (c + 1)))
beta = np.zeros((test_len, (c + 1)))
low_outlier = 0
high_outlier = 0
for a in range(0, test_len):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
for b in range(1, c):
if observation > forecast[b]:
alpha[a, b] = forecast[b] - forecast[b - 1]
beta[a, b] = 0
elif forecast[b] > observation > forecast[b - 1]:
alpha[a, b] = observation - forecast[b - 1]
beta[a, b] = forecast[b] - observation
else:
alpha[a, b] = 0
beta[a, b] = forecast[b] - forecast[b - 1]
# overwrite boundaries in case of outliers
if observation < forecast[0]:
beta[a, 0] = forecast[0] - observation
low_outlier += 1
if observation > forecast[c - 1]:
alpha[a, c] = observation - forecast[c - 1]
high_outlier += 1
alpha_bar = np.mean(alpha, axis=0)
beta_bar = np.mean(beta, axis=0)
g_bar = alpha_bar + beta_bar
o_bar = beta_bar / (alpha_bar + beta_bar)
if low_outlier > 0:
o_bar[0] = low_outlier / test_len
g_bar[0] = beta_bar[0] / o_bar[0]
else:
o_bar[0] = 0
g_bar[0] = 0
if high_outlier > 0:
o_bar[c] = high_outlier / test_len
g_bar[c] = alpha_bar[c] / o_bar[c]
else:
o_bar[c] = 0
g_bar[c] = 0
p_i = np.arange(0 / c, (c + 1) / c, 1 / c)
self.CRPS_cal = np.sum(
g_bar * ((1 - o_bar) * (p_i**2) + o_bar * ((1 - p_i) ** 2))
)
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.xlim([0, np.max(FRC_X)])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax1 = fig.axes[0]
ax1.set_title("(a)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c="k")
plt.scatter(CI_x, capture, label="All observations")
plt.scatter(CI_x, capture_20, label="Point-of-Consumption FRC below 0.2 mg/L")
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax2 = fig.axes[1]
ax2.set_title("(b)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel("Rank")
plt.ylabel("Probability")
ax3 = fig.axes[2]
ax3.set_title("(c)", y=0.88, x=0.05)
plt.savefig(
os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png",
format="png",
bbox_inches="tight",
)
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png", bbox_inches="tight")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def get_bw(self):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
s2 = []
xt_yt = []
for a in range(0, len(Y_true)):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
s2 = np.append(s2, np.var(forecast))
xt_yt = np.append(xt_yt, (np.mean(forecast) - observation) ** 2)
WB_bw = np.mean(xt_yt) - (1 + 1 / self.network_count) * np.mean(s2)
return WB_bw
def post_process_performance_eval(self, bandwidth):
Y_true = np.squeeze(np.array(self.targets))
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
test_len = len(Y_true)
min_CI = []
max_CI = []
CI_90_Lower = []
CI_90_Upper = []
CI_80_Lower = []
CI_80_Upper = []
CI_70_Lower = []
CI_70_Upper = []
CI_60_Lower = []
CI_60_Upper = []
CI_50_Lower = []
CI_50_Upper = []
CI_40_Lower = []
CI_40_Upper = []
CI_30_Lower = []
CI_30_Upper = []
CI_20_Lower = []
CI_20_Upper = []
CI_10_Lower = []
CI_10_Upper = []
CI_median = []
CRPS = []
Kernel_Risk = []
evaluation_range = np.arange(-10, 10.001, 0.001)
# compute CRPS as well as the confidence intervals of each ensemble forecast
for a in range(0, test_len):
scipy_kde = scipy.stats.gaussian_kde(Y_pred[:, a], bw_method=bandwidth)
scipy_pdf = scipy_kde.evaluate(evaluation_range) * 0.001
scipy_cdf = np.cumsum(scipy_pdf)
min_CI = np.append(
min_CI, evaluation_range[np.max(np.where(scipy_cdf == 0)[0])]
)
max_CI = np.append(max_CI, evaluation_range[np.argmax(scipy_cdf)])
CI_90_Lower = np.append(
CI_90_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.05)))]
)
CI_90_Upper = np.append(
CI_90_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.95)))]
)
CI_80_Lower = np.append(
CI_80_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.1)))]
)
CI_80_Upper = np.append(
CI_80_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.9)))]
)
CI_70_Lower = np.append(
CI_70_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.15)))]
)
CI_70_Upper = np.append(
CI_70_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.85)))]
)
CI_60_Lower = np.append(
CI_60_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.2)))]
)
CI_60_Upper = np.append(
CI_60_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.8)))]
)
CI_50_Lower = np.append(
CI_50_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.25)))]
)
CI_50_Upper = np.append(
CI_50_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.75)))]
)
CI_40_Lower = np.append(
CI_40_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.3)))]
)
CI_40_Upper = np.append(
CI_40_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.7)))]
)
CI_30_Lower = np.append(
CI_30_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.35)))]
)
CI_30_Upper = np.append(
CI_30_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.65)))]
)
CI_20_Lower = np.append(
CI_20_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.4)))]
)
CI_20_Upper = np.append(
CI_20_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.6)))]
)
CI_10_Lower = np.append(
CI_10_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.45)))]
)
CI_10_Upper = np.append(
CI_10_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.55)))]
)
CI_median = np.append(
CI_median, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.50)))]
)
Kernel_Risk = np.append(Kernel_Risk, scipy_kde.integrate_box_1d(-10, 0.2))
Heaviside = (evaluation_range >= Y_true[a]).astype(int)
CRPS_dif = (scipy_cdf - Heaviside) ** 2
CRPS = np.append(CRPS, np.sum(CRPS_dif * 0.001))
mean_CRPS = np.mean(CRPS)
capture_all = (
np.less_equal(Y_true, max_CI) * np.greater_equal(Y_true, min_CI) * 1
)
capture_90 = (
np.less_equal(Y_true, CI_90_Upper)
* np.greater_equal(Y_true, CI_90_Lower)
* 1
)
capture_80 = (
np.less_equal(Y_true, CI_80_Upper)
* np.greater_equal(Y_true, CI_80_Lower)
* 1
)
capture_70 = (
np.less_equal(Y_true, CI_70_Upper)
* np.greater_equal(Y_true, CI_70_Lower)
* 1
)
capture_60 = (
np.less_equal(Y_true, CI_60_Upper)
* np.greater_equal(Y_true, CI_60_Lower)
* 1
)
capture_50 = (
np.less_equal(Y_true, CI_50_Upper)
* np.greater_equal(Y_true, CI_50_Lower)
* 1
)
capture_40 = (
np.less_equal(Y_true, CI_40_Upper)
* np.greater_equal(Y_true, CI_40_Lower)
* 1
)
capture_30 = (
np.less_equal(Y_true, CI_30_Upper)
* np.greater_equal(Y_true, CI_30_Lower)
* 1
)
capture_20 = (
np.less_equal(Y_true, CI_20_Upper)
* np.greater_equal(Y_true, CI_20_Lower)
* 1
)
capture_10 = (
np.less_equal(Y_true, CI_10_Upper)
* np.greater_equal(Y_true, CI_10_Lower)
* 1
)
length_20 = np.sum(np.less(Y_true, 0.2))
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture_sum_squares = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
capture_20_sum_squares = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
return (
mean_CRPS,
capture_sum_squares,
capture_20_sum_squares,
capture_all_sum / test_len,
capture_all_20_sum / length_20,
)
def post_process_cal(self):
self.WB_bandwidth = self.get_bw()
(
self.CRPS_post_cal,
self.CI_reliability_post_cal,
self.CI_reliability_02_post_cal,
self.percent_capture_post_cal,
self.percent_capture_02_post_cal,
) = self.post_process_performance_eval(self.WB_bandwidth)
CRPS_Skill = (self.CRPS_post_cal - self.CRPS_cal) / (0 - self.CRPS_cal)
CI_Skill = (self.CI_reliability_post_cal - self.CI_reliability_cal) / (
0 - self.CI_reliability_cal
)
CI_20_Skill = (self.CI_reliability_02_post_cal - self.CI_reliability_02_cal) / (
0 - self.CI_reliability_02_cal
)
PC_Skill = (self.percent_capture_post_cal - self.percent_capture_cal) / (
1 - self.percent_capture_cal
)
PC_20_Skill = (
self.percent_capture_02_post_cal - self.percent_capture_02_cal
) / (1 - self.percent_capture_02_cal)
Net_Score = CRPS_Skill + CI_Skill + CI_20_Skill + PC_Skill + PC_20_Skill
if Net_Score > 0:
self.post_process_check = True
else:
self.post_process_check = False
def full_performance_evaluation(self, directory):
x_norm = self.predictors_scaler.transform(self.predictors)
t_norm = self.targets_scaler.transform(self.targets)
base_model = self.model
base_model.save(directory + "\\base_network.h5")
x_cal_norm, x_test_norm, t_cal_norm, t_test_norm = train_test_split(
x_norm, t_norm, test_size=0.25, shuffle=False, random_state=10
)
self.verifying_observations = self.targets_scaler.inverse_transform(t_test_norm)
self.test_x_data = self.predictors_scaler.inverse_transform(x_test_norm)
early_stopping_monitor = keras.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=10, restore_best_weights=True
)
self.verifying_predictions = []
for i in range(0, self.network_count):
tf.keras.backend.clear_session()
self.model = keras.models.load_model(directory + "\\base_network.h5")
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(
x_cal_norm,
t_cal_norm,
train_size=1 / 3,
shuffle=True,
random_state=i**2,
)
new_weights = [
np.random.uniform(-0.05, 0.05, w.shape)
for w in self.model.get_weights()
]
self.model.set_weights(new_weights)
self.model.fit(
x_norm_train,
t_norm_train,
epochs=self.epochs,
validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor],
verbose=0,
batch_size=len(t_norm_train),
)
self.verifying_predictions.append(self.targets_scaler.inverse_transform(self.model.predict(x_test_norm)))
Y_true = np.array(self.verifying_observations)
Y_pred = np.array(self.verifying_predictions)
FRC_X = self.test_x_data[:, 0]
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c='k')
plt.scatter(CI_x, capture)
plt.scatter(CI_x, capture_20)
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel('Rank')
plt.ylabel('Probability')
plt.savefig(directory + "\\Verification_Diagnostic_Figs.png", format='png')
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format='png')
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def set_inputs_for_table(self, storage_target):
frc = np.arange(0.20, 2.05, 0.05)
lag_time = [storage_target for i in range(0, len(frc))]
am_collect = [0 for i in range(0, len(frc))]
pm_collect = [1 for i in range(0, len(frc))]
temp_med_am = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": am_collect,
}
temp_med_pm = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": pm_collect,
}
temp_95_am = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": am_collect,
}
temp_95_pm = {
"ts_frc": frc,
"elapsed time": lag_time,
"time of collection (0=AM, 1=PM)": pm_collect,
}
if WATTEMP in self.datainputs.columns:
watt_med = [self.median_wattemp for i in range(0, len(frc))]
watt_95 = [self.upper95_wattemp for i in range(0, len(frc))]
temp_med_am.update({"ts_wattemp": watt_med})
temp_med_pm.update({"ts_wattemp": watt_med})
temp_95_am.update({"ts_wattemp": watt_95})
temp_95_pm.update({"ts_wattemp": watt_95})
if COND in self.datainputs.columns:
cond_med = [self.median_cond for i in range(0, len(frc))]
cond_95 = [self.upper95_cond for i in range(0, len(frc))]
temp_med_am.update({"ts_cond": cond_med})
temp_med_pm.update({"ts_cond": cond_med})
temp_95_am.update({"ts_cond": cond_95})
temp_95_pm.update({"ts_cond": cond_95})
self.avg_case_predictors_am = | pd.DataFrame(temp_med_am) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - | pd.Timedelta(days=1) | pandas.Timedelta |
# Read Matches and Find Players
# Modules
from espncricinfo.player import Player
from espncricinfo.match import Match
from espncricinfo.series import Series
import json
import pdb
from collections import Counter
from tqdm import tqdm
import pandas as pd
import os
## TO ADD: OUTPUT TOTAL MATCHES AND NUMBER OF RAIN AFFECTED MATCHES
path = os.getcwd()
# Read CSVs
all_matches = pd.read_csv('c_champ_matches.csv')
rain_matches = | pd.read_csv('rain_matches.csv') | pandas.read_csv |
import logging
import os
import re
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Any, Tuple, Optional
import pandas as pd
from python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \
TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION
logger = logging.getLogger()
def read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]:
tree = ET.parse(xml_path)
# 1: read document info
root = tree.getroot()
assert root.tag == "Document"
doc_filename = root.attrib["doc_name"]
doc_id = root.attrib["doc_id"]
m = re.match(r"(?P<topic_id>\d+)_(?P<document_number>\d+)(?P<subtopic>\w+)\.xml", doc_filename)
topic_id = m.group("topic_id")
subtopic = m.group("subtopic")
document_number = int(m.group("document_number"))
documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)],
names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID])
documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index),
DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)})
# 2: read document content
contents_rows = []
contents_index = []
for token_elmt in root.iter("token"):
# index content
sentence_idx = int(token_elmt.attrib["sentence"])
token_idx = int(token_elmt.attrib["number"])
contents_index.append((doc_id, sentence_idx, token_idx))
# content
token = token_elmt.text
contents_rows.append({TOKEN: token})
contents_index = | pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX]) | pandas.MultiIndex.from_tuples |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna( | Period("2012-01", freq="M") | pandas.Period |
# -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, | pd.Index([1, 0, 2, 3, 4]) | pandas.Index |
"""Tests for the sdv.constraints.base module."""
import warnings
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianUnivariate
from rdt.hyper_transformer import HyperTransformer
from sdv.constraints.base import Constraint, _get_qualified_name, get_subclasses, import_object
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import ColumnFormula, UniqueCombinations
def test__get_qualified_name_class():
"""Test the ``_get_qualified_name`` function, if a class is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a class.
Input:
- A class.
Output:
- The class qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(Constraint)
# Assert
expected_name = 'sdv.constraints.base.Constraint'
assert fully_qualified_name == expected_name
def test__get_qualified_name_function():
"""Test the ``_get_qualified_name`` function, if a function is passed.
The ``_get_qualified_name`` function is expected to:
- Return the Fully Qualified Name from a function.
Input:
- A function.
Output:
- The function qualified name.
"""
# Run
fully_qualified_name = _get_qualified_name(_get_qualified_name)
# Assert
expected_name = 'sdv.constraints.base._get_qualified_name'
assert fully_qualified_name == expected_name
def test_get_subclasses():
"""Test the ``get_subclasses`` function.
The ``get_subclasses`` function is expected to:
- Recursively find subclasses for the class object passed.
Setup:
- Create three classes, Parent, Child and GrandChild,
which inherit of each other hierarchically.
Input:
- The Parent class.
Output:
- Dict of the subclasses of the class: ``Child`` and ``GrandChild`` classes.
"""
# Setup
class Parent:
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
# Run
subclasses = get_subclasses(Parent)
# Assert
expected_subclasses = {
'Child': Child,
'GrandChild': GrandChild
}
assert subclasses == expected_subclasses
def test_import_object_class():
"""Test the ``import_object`` function, when importing a class.
The ``import_object`` function is expected to:
- Import a class from its qualifed name.
Input:
- Qualified name of the class.
Output:
- The imported class.
"""
# Run
obj = import_object('sdv.constraints.base.Constraint')
# Assert
assert obj is Constraint
def test_import_object_function():
"""Test the ``import_object`` function, when importing a function.
The ``import_object`` function is expected to:
- Import a function from its qualifed name.
Input:
- Qualified name of the function.
Output:
- The imported function.
"""
# Run
imported = import_object('sdv.constraints.base.import_object')
# Assert
assert imported is import_object
class TestConstraint():
def test__identity(self):
"""Test ```Constraint._identity`` method.
``_identity`` method should return whatever it is passed.
Input:
- anything
Output:
- Input
"""
# Run
instance = Constraint('all')
output = instance._identity('input')
# Asserts
assert output == 'input'
def test___init___transform(self):
"""Test ```Constraint.__init__`` method when 'transform' is passed.
If 'transform' is given, the ``__init__`` method should replace the ``is_valid`` method
with an identity and leave ``transform`` and ``reverse_transform`` untouched.
Input:
- transform
Side effects:
- is_valid == identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='transform')
# Asserts
assert instance.filter_valid == instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___reject_sampling(self):
"""Test ``Constraint.__init__`` method when 'reject_sampling' is passed.
If 'reject_sampling' is given, the ``__init__`` method should replace the ``transform``
and ``reverse_transform`` methods with an identity and leave ``is_valid`` untouched.
Input:
- reject_sampling
Side effects:
- is_valid != identity
- transform == identity
- reverse_transform == identity
"""
# Run
instance = Constraint(handling_strategy='reject_sampling')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform == instance._identity
assert instance.reverse_transform == instance._identity
def test___init___all(self):
"""Test ``Constraint.__init__`` method when 'all' is passed.
If 'all' is given, the ``__init__`` method should leave ``transform``,
``reverse_transform`` and ``is_valid`` untouched.
Input:
- all
Side effects:
- is_valid != identity
- transform != identity
- reverse_transform != identity
"""
# Run
instance = Constraint(handling_strategy='all')
# Asserts
assert instance.filter_valid != instance._identity
assert instance.transform != instance._identity
assert instance.reverse_transform != instance._identity
def test___init___not_kown(self):
"""Test ``Constraint.__init__`` method when a not known ``handling_strategy`` is passed.
If a not known ``handling_strategy`` is given, a ValueError is raised.
Input:
- not_known
Side effects:
- ValueError
"""
# Run
with pytest.raises(ValueError):
Constraint(handling_strategy='not_known')
def test_fit(self):
"""Test the ``Constraint.fit`` method.
The base ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._fit = Mock()
# Run
instance.fit(table_data)
# Assert
instance._fit.assert_called_once_with(table_data)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
def test_fit_gaussian_multivariate_correct_distribution(self, gm_mock):
"""Test the ``GaussianMultivariate`` from the ``Constraint.fit`` method.
The ``GaussianMultivariate`` is expected to be called with default distribution
set as ``GaussianUnivariate``.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 2, 3]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.assert_called_once_with(distribution=GaussianUnivariate)
@patch('sdv.constraints.base.GaussianMultivariate', spec_set=GaussianMultivariate)
@patch('sdv.constraints.base.HyperTransformer', spec_set=HyperTransformer)
def test_fit_trains_column_model(self, ht_mock, gm_mock):
"""Test the ``Constraint.fit`` method trains the column model.
When ``fit_columns_model`` is True and there are multiple ``constraint_columns``,
the ``Constraint.fit`` method is expected to:
- Call ``_fit`` method.
- Create ``_hyper_transformer``.
- Create ``_column_model`` and train it.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance = Constraint(handling_strategy='transform', fit_columns_model=True)
instance.constraint_columns = ('a', 'b')
# Run
instance.fit(table_data)
# Assert
gm_mock.return_value.fit.assert_called_once()
calls = ht_mock.return_value.fit_transform.mock_calls
args = calls[0][1]
assert len(calls) == 1
pd.testing.assert_frame_equal(args[0], table_data)
def test_transform(self):
"""Test the ``Constraint.transform`` method.
It is an identity method for completion, to be optionally
overwritten by subclasses.
The ``Constraint.transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.transform('input')
# Assert
assert output == 'input'
def test_transform_calls__transform(self):
"""Test that the ``Constraint.transform`` method calls ``_transform``.
The ``Constraint.transform`` method is expected to:
- Return value returned by ``_transform``.
Input:
- Anything
Output:
- Result of ``_transform(input)``
"""
# Setup
constraint_mock = Mock()
constraint_mock.fit_columns_model = False
constraint_mock._transform.return_value = 'the_transformed_data'
constraint_mock._validate_columns.return_value = pd.DataFrame()
# Run
output = Constraint.transform(constraint_mock, 'input')
# Assert
assert output == 'the_transformed_data'
def test_transform_model_disabled_any_columns_missing(self):
"""Test the ``Constraint.transform`` method with invalid data.
If ``table_data`` is missing any columns and ``fit_columns_model``
is False, it should raise a ``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform', fit_columns_model=False)
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c']))
def test_transform_model_enabled_all_columns_missing(self):
"""Test the ``Constraint.transform`` method with missing columns.
If ``table_data`` is missing all of the ``constraint_columns`` and
``fit_columns_model`` is True, it should raise a
``MissingConstraintColumnError``.
The ``Constraint.transform`` method is expected to:
- Raise ``MissingConstraintColumnError``.
"""
# Run
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a',)
# Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame())
def test_transform_model_enabled_some_columns_missing(self):
"""Test that the ``Constraint.transform`` method uses column model.
If ``table_data`` is missing some of the ``constraint_columns``,
the ``_column_model`` should be used to sample the rest and the
data should be transformed.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
conditions = [
pd.DataFrame([[5, 1, 2]], columns=['a', 'b', 'c']),
pd.DataFrame([[6, 3, 4]], columns=['a', 'b', 'c'])
]
transformed_conditions = [
pd.DataFrame([[1]], columns=['b']),
pd.DataFrame([[3]], columns=['b'])
]
instance._columns_model.sample.return_value = pd.DataFrame([
[1, 2, 3]
], columns=['b', 'c', 'a'])
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform.side_effect = conditions
# Run
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
transformed_data = instance.transform(data)
# Assert
expected_tranformed_data = pd.DataFrame([[1, 2, 3]], columns=['b', 'c', 'a'])
expected_result = pd.DataFrame([
[5, 1, 2],
[6, 3, 4]
], columns=['a', 'b', 'c'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 1})
instance._columns_model.sample.assert_any_call(num_rows=1, conditions={'b': 3})
reverse_transform_calls = instance._hyper_transformer.reverse_transform.mock_calls
pd.testing.assert_frame_equal(reverse_transform_calls[0][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(reverse_transform_calls[1][1][0], expected_tranformed_data)
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling(self):
"""Test the ``Constraint.transform`` method's reject sampling.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows.
Setup:
- The ``_columns_model`` returns some valid_rows the first time,
and then the rest with the next call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b']),
pd.DataFrame([
[1, 4],
[1, 5],
[1, 6],
[1, 7]
], columns=['a', 'b']),
]
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 2
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
assert model_calls[1][2]['num_rows'] > 3
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_transform_model_enabled_reject_sampling_error(self):
"""Test that the ``Constraint.transform`` method raises an error appropriately.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If it doesn't
get any valid rows in 100 tries, a ``ValueError`` is raised.
Setup:
- The ``_columns_model`` is fixed to always return an empty ``DataFrame``.
Input:
- Table with some missing columns.
Side Effect:
- ``ValueError`` raised.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = pd.DataFrame([[1]], columns=['b'])
instance._columns_model.sample.return_value = pd.DataFrame()
instance._hyper_transformer.transform.return_value = transformed_conditions
instance._hyper_transformer.reverse_transform.return_value = pd.DataFrame()
# Run / Assert
data = pd.DataFrame([[1, 2], [3, 4]], columns=['b', 'c'])
with pytest.raises(ValueError):
instance.transform(data)
def test_transform_model_enabled_reject_sampling_duplicates_valid_rows(self):
"""Test the ``Constraint.transform`` method's reject sampling fall back.
If the column model is used but doesn't return valid rows,
reject sampling should be used to get the valid rows. If after 100
tries, some valid rows are created but not enough, then the valid rows
are duplicated to meet the ``num_rows`` requirement.
Setup:
- The ``_columns_model`` returns some valid rows the first time, and then
an empy ``DataFrame`` for every other call.
Input:
- Table with some missing columns.
Output:
- Transformed data with all columns.
"""
# Setup
instance = Constraint(handling_strategy='transform')
instance._transform = lambda x: x
instance.constraint_columns = ('a', 'b')
instance._hyper_transformer = Mock()
instance._columns_model = Mock()
transformed_conditions = [pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])]
instance._columns_model.sample.side_effect = [
pd.DataFrame([
[1, 2],
[1, 3]
], columns=['a', 'b'])
] + [pd.DataFrame()] * 100
instance._hyper_transformer.transform.side_effect = transformed_conditions
instance._hyper_transformer.reverse_transform = lambda x: x
# Run
data = pd.DataFrame([[1], [1], [1], [1], [1]], columns=['b'])
transformed_data = instance.transform(data)
# Assert
expected_result = pd.DataFrame([
[1, 2],
[1, 3],
[1, 2],
[1, 3],
[1, 2]
], columns=['a', 'b'])
model_calls = instance._columns_model.sample.mock_calls
assert len(model_calls) == 101
instance._columns_model.sample.assert_any_call(num_rows=5, conditions={'b': 1})
pd.testing.assert_frame_equal(transformed_data, expected_result)
def test_fit_transform(self):
"""Test the ``Constraint.fit_transform`` method.
The ``Constraint.fit_transform`` method is expected to:
- Call the ``fit`` method.
- Call the ``transform`` method.
- Return the input data unmodified.
Input:
- Anything
Output:
- self.transform output
Side Effects:
- self.fit is called with input
- self.transform is called with input
"""
# Setup
constraint_mock = Mock()
constraint_mock.transform.return_value = 'the_transformed_data'
# Run
data = 'my_data'
output = Constraint.fit_transform(constraint_mock, data)
# Assert
assert output == 'the_transformed_data'
constraint_mock.fit.assert_called_once_with('my_data')
constraint_mock.transform.assert_called_once_with('my_data')
def test_reverse_transform(self):
"""Test the ``Constraint.reverse_transform`` method. It is an identity method
for completion, to be optionally overwritten by subclasses.
The ``Constraint.reverse_transform`` method is expected to:
- Return the input data unmodified.
Input:
- Anything
Output:
- Input
"""
# Run
instance = Constraint(handling_strategy='transform')
output = instance.reverse_transform('input')
# Assert
assert output == 'input'
def test_is_valid(self):
"""Test the ``Constraint.is_valid` method. This should be overwritten by all the
subclasses that have a way to decide which rows are valid and which are not.
The ``Constraint.is_valid`` method is expected to:
- Say whether the given table rows are valid.
Input:
- Table data (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
# Run
instance = Constraint(handling_strategy='transform')
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_filter_valid(self):
"""Test the ``Constraint.filter_valid`` method.
The ``Constraint.filter_valid`` method is expected to:
- Filter the input data by calling the method ``is_valid``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, with only the valid rows (pandas.DataFrame)
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3]
})
constraint_mock = Mock()
constraint_mock.is_valid.return_value = pd.Series([True, True, False])
# Run
out = Constraint.filter_valid(constraint_mock, table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2]
})
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.